diff --git a/go.mod b/go.mod index 3c8082346..418874a99 100644 --- a/go.mod +++ b/go.mod @@ -1,75 +1,70 @@ module github.com/rclone/rclone require ( - bazil.org/fuse v0.0.0-20191225233854-3a99aca11732 - cloud.google.com/go v0.47.0 // indirect + bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc + cloud.google.com/go v0.53.0 // indirect github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-storage-blob-go v0.8.0 github.com/Azure/go-autorest/autorest/adal v0.6.0 // indirect - github.com/Unknwon/goconfig v0.0.0-20190425194916-3dba17dd7b9e + github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 github.com/abbot/go-http-auth v0.4.0 github.com/anacrolix/dms v1.1.0 github.com/atotto/clipboard v0.1.2 - github.com/aws/aws-sdk-go v1.25.31 + github.com/aws/aws-sdk-go v1.29.9 github.com/billziss-gh/cgofuse v1.2.0 github.com/djherbis/times v1.2.0 - github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible + github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible github.com/etcd-io/bbolt v1.3.3 - github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect - github.com/google/go-cmp v0.3.1 // indirect github.com/google/go-querystring v1.0.0 // indirect github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54 - github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2 + github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3 github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/koofr/go-httpclient v0.0.0-20190818202018-e0dc8fd921dc github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a github.com/mattn/go-colorable v0.1.4 - github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect - github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9 // indirect - github.com/mattn/go-runewidth v0.0.7 + github.com/mattn/go-ieproxy v0.0.0-20200203040449-2dbc853185d9 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mattn/go-runewidth v0.0.8 github.com/mitchellh/go-homedir v1.1.0 github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 - github.com/ncw/swift v1.0.49 - github.com/nsf/termbox-go v0.0.0-20191229070316-58d4fcbce2a7 + github.com/ncw/swift v1.0.50 + github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd github.com/onsi/ginkgo v1.9.0 // indirect github.com/onsi/gomega v1.6.0 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible - github.com/pkg/errors v0.8.1 - github.com/pkg/sftp v1.10.1 - github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877 + github.com/pkg/errors v0.9.1 + github.com/pkg/sftp v1.11.0 + github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 github.com/sevlyar/go-daemon v0.1.5 github.com/sirupsen/logrus v1.4.2 - github.com/skratchdot/open-golang v0.0.0-20190402232053-79abb63cd66e + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 github.com/smartystreets/assertions v1.0.1 // indirect github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect - github.com/spf13/cobra v0.0.5 + github.com/spf13/cobra v0.0.6 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.5.1 github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d github.com/xanzy/ssh-agent v0.2.1 github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 - github.com/yunify/qingstor-sdk-go/v3 v3.1.1 + github.com/yunify/qingstor-sdk-go/v3 v3.2.0 go.etcd.io/bbolt v1.3.3 // indirect - go.opencensus.io v0.22.2 // indirect - goftp.io/server v0.0.0-20190812052725-72a57b186803 - golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 - golang.org/x/net v0.0.0-20191109021931-daa7c04131f5 - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + goftp.io/server v0.3.2 + golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d + golang.org/x/net v0.0.0-20200222125558-5a598a2470a0 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 + golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - google.golang.org/api v0.13.0 - google.golang.org/appengine v1.6.5 // indirect - google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a // indirect - google.golang.org/grpc v1.25.1 // indirect - gopkg.in/yaml.v2 v2.2.5 + google.golang.org/api v0.18.0 + google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect + gopkg.in/yaml.v2 v2.2.8 ) go 1.13 diff --git a/go.sum b/go.sum index 971a3a1ea..62c8f910c 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -bazil.org/fuse v0.0.0-20191225233854-3a99aca11732 h1:gaB1+kZCJDExjlrdy37gIwxV0M7v81EzIFKQZ5o5YV0= -bazil.org/fuse v0.0.0-20191225233854-3a99aca11732/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc h1:utDghgcjE8u+EBjHOgYT+dJPcnDF05KqWMBcjuJy510= +bazil.org/fuse v0.0.0-20200117225306-7b5117fecadc/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -7,16 +7,17 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.47.0 h1:1JUtpcY9E7+eTospEwWS2QXP3DEn7poB3E2j0jN74mM= -cloud.google.com/go v0.47.0/go.mod h1:5p3Ky/7f3N10VBkhuR5LFtddroTiMyjZV/Kj5qOQFxU= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -gitea.com/goftp/file-driver v0.0.0-20190712091345-f79c2ed973f8/go.mod h1:ghdogu0Da3rwYCSJ20JPgTiMcDpzeRbzvuFIOOW3G7w= -gitea.com/goftp/file-driver v0.0.0-20190812052443-efcdcba68b34 h1:3wshUWDKHcy8hrNafCS4rtuAdON2KYsuznc05zdHTrQ= -gitea.com/goftp/file-driver v0.0.0-20190812052443-efcdcba68b34/go.mod h1:6+f1gclV97PmaVmE4YJbH3KIKnl+r3/HWR0zD/z1CG4= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -38,75 +39,100 @@ github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1Gn github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= -github.com/Unknwon/goconfig v0.0.0-20190425194916-3dba17dd7b9e h1:ZaFHdRwv6wJQMYsg5qITIsqWRqZRvUETiq0xxrl+8fc= -github.com/Unknwon/goconfig v0.0.0-20190425194916-3dba17dd7b9e/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= +github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd h1:+CYOsXi89xOqBkj7CuEJjA2It+j+R3ngUZEydr6mtkw= +github.com/Unknwon/goconfig v0.0.0-20191126170842-860a72fb44fd/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4 h1:mK1/QgFPU4osbhjJ26B1w738kjQHaGJcon8uCLMS8fk= github.com/a8m/tree v0.0.0-20181222104329-6a0b80129de4/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anacrolix/dms v1.1.0 h1:vbBXZS7T5FaZm+9p1pdmVVo9tN3qdc27bKSETdeT3xo= github.com/anacrolix/dms v1.1.0/go.mod h1:msPKAoppoNRfrYplJqx63FZ+VipDZ4Xsj3KzIQxyU7k= github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= +github.com/anacrolix/ffprobe v1.0.0 h1:j8fGLBsXejwdXd0pkA9iR3Dt1XwMFv5wjeYWObcue8A= github.com/anacrolix/ffprobe v1.0.0/go.mod h1:BIw+Bjol6CWjm/CRWrVLk2Vy+UYlkgmBZ05vpSYqZPw= github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo= github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/atotto/clipboard v0.1.2 h1:YZCtFu5Ie8qX2VmVTBnrqLSiU9XOWwqNRmdT3gIQzbY= github.com/atotto/clipboard v0.1.2/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= -github.com/aws/aws-sdk-go v1.25.31 h1:14mdh3HsTgRekePPkYcCbAaEXJknc3mN7f4XfsiMMDA= -github.com/aws/aws-sdk-go v1.25.31/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.29.9 h1:PHq9ddjfZYfCOXyqHKiCZ1CHRAk7nXhV7WTqj5l+bmQ= +github.com/aws/aws-sdk-go v1.29.9/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl/7IA= github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM= github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/djherbis/times v1.2.0 h1:xANXjsC/iBqbO00vkWlYwPWgBgEVU6m6AFYg0Pic+Mc= github.com/djherbis/times v1.2.0/go.mod h1:CGMZlo255K5r4Yw0b9RRfFQpM2y7uOmxg4jm9HsaVf8= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible h1:9jnukMIowLSo3SY7+GTwxmYJv4QC0LxXbo97zHWCyoc= -github.com/dropbox/dropbox-sdk-go-unofficial v5.4.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY= +github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible h1:DtumzkLk2zZ2SeElEr+VNz+zV7l+BTe509cV4sKPXbM= +github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY= github.com/dustin/go-humanize v0.0.0-20180421182945-02af3965c54e/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9 h1:cC0Hbb+18DJ4i6ybqDybvj4wdIDS4vnD0QEci98PgM8= -github.com/goftp/file-driver v0.0.0-20180502053751-5d604a0fc0c9/go.mod h1:GpOj6zuVBG3Inr9qjEnuVTgBlk2lZ1S9DcoFiXWyKss= -github.com/goftp/server v0.0.0-20190304020633-eabccc535b5a/go.mod h1:k/SS6VWkxY7dHPhoMQ8IdRu8L4lQtmGbhyXGg+vCnXE= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -116,11 +142,14 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -129,6 +158,10 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.0.3-0.20191108143333-152e6ac32d54 h1:0JL4/kY3QKTRevfl0IbEncTzA+jczGba+swfDBBluuU= @@ -140,21 +173,26 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= -github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2 h1:WY3P4euRv9s8F2rpZUK1jnk4ZMiV3O2ltdnoZK/GTUU= -github.com/jlaffaye/ftp v0.0.0-20191025175106-a59fe673c9b2/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k= +github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3 h1:QyB6CQGLB65Al72mAIbqrkGRk56JdGMHgBziM3F0FCw= +github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6 h1:RyOL4+OIUc6u5ac2LclitlZvFES6k+sg18fBMfxFUUs= github.com/jzelinskie/whirlpool v0.0.0-20170603002051-c19460b8caa6/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -166,6 +204,7 @@ github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a h1:02cx9xF4W2 github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -178,25 +217,29 @@ github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaa github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw= -github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20200203040449-2dbc853185d9 h1:a6/tH/zhh2tdoUSDgS4gNcY6H2Mae/70R+jEkRv52ws= +github.com/mattn/go-ieproxy v0.0.0-20200203040449-2dbc853185d9/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9 h1:tM1L+QoyOIq/0KiBQ4y/jUW0jxB5kz35bz+PSoQYjq8= -github.com/mattn/go-isatty v0.0.11-0.20191112051248-2a2f0ea997f9/go.mod h1:cxQpGCW53krnBJYXw0m6SYdk+OIHR4jbEstSUj/+MQ4= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0= +github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0= github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0= -github.com/ncw/swift v1.0.49 h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E= -github.com/ncw/swift v1.0.49/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/swift v1.0.50 h1:E01b5bVIssNhx2KnzAjMWEXkKrb8ytTqCDWY7lqmWjA= +github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/nsf/termbox-go v0.0.0-20191229070316-58d4fcbce2a7 h1:OkWEy7aQeQTbgdrcGi9bifx+Y6bMM7ae7y42hDFaBvA= -github.com/nsf/termbox-go v0.0.0-20191229070316-58d4fcbce2a7/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= +github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be h1:yzmWtPyxEUIKdZg4RcPq64MfS8NA6A5fNOJgYhpR9EQ= +github.com/nsf/termbox-go v0.0.0-20200204031403-4d2b513ad8be/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs= github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -210,44 +253,63 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU= github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI= +github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877 h1:sKIa5MAIViLAnQbEo+uiDi2FMowy8KcdZW8XZpmyNxs= -github.com/putdotio/go-putio v0.0.0-20190822121956-19b9c636c877/go.mod h1:EWtDL88jJLLWZzywr0QaPO+mGP8gFpvl8dcox8qTk3Y= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= +github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46 h1:w2CpS5muK+jyydnmlkqpAhzKmHmMBzBkfYUDjQNS1Dk= github.com/rfjakob/eme v0.0.0-20171028163933-2222dbd4ba46/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/sevlyar/go-daemon v0.1.5 h1:Zy/6jLbM8CfqJ4x4RPr7MJlSKt90f00kNM1D401C+Qk= github.com/sevlyar/go-daemon v0.1.5/go.mod h1:6dJpPatBT9eUwM5VCw9Bt6CdX9Tk6UWvhW3MebLDRKE= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/skratchdot/open-golang v0.0.0-20190402232053-79abb63cd66e h1:VAzdS5Nw68fbf5RZ8RDVlUvPXNU6Z3jtPCK/qvm4FoQ= -github.com/skratchdot/open-golang v0.0.0-20190402232053-79abb63cd66e/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -257,20 +319,25 @@ github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709 h1:Ko2LQMrRU+Oy github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d h1:GsRmok9VXIEc5B6SmRWuuO9hx4x2YBqFqgOXGt9Xs94= github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 h1:Ud2neINE1YFEwrcJ4EqnbRZlm9R3T8SuFKeqjIw7k44= github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yunify/qingstor-sdk-go/v3 v3.1.1 h1:jQkY9N+zSL8h8CqgrDQpXe8/mqJOx8vgGjk6O//RA/4= -github.com/yunify/qingstor-sdk-go/v3 v3.1.1/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4= +github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS9f0uCWtTr8= +github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -278,11 +345,14 @@ go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -goftp.io/server v0.0.0-20190712054601-1149070ae46b/go.mod h1:xreggPYu7ZuNe9PfbxiQca7bYGwU44IvlCCg3KzWJtQ= -goftp.io/server v0.0.0-20190812034929-9b3874d17690/go.mod h1:99FISrRpwKfaL4Ey/dX8N48WToveng/s2OXR5sJ3cnc= -goftp.io/server v0.0.0-20190812052725-72a57b186803 h1:I2IgXYRuOZ6LceE7VY6aSnYuUy6Wot3WFhqI5WsAHXQ= -goftp.io/server v0.0.0-20190812052725-72a57b186803/go.mod h1:eDjthxa5tFTS2JVry2jHt1g9y3J0Vgu2Nd+lmNWev7Y= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +goftp.io/server v0.3.2 h1:bcsI4ijbvFZkA4rrUtIE/t1jNhT+0uSkiTQ4ASjZAXQ= +goftp.io/server v0.3.2/go.mod h1:wfeAZeQgacupLVl+Ex3ozYFaAGNfCKYZiZNxLzgw6z4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -290,13 +360,17 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 h1:nVJ3guKA9qdkEQ3TUdXI9QSINo2CUPM/cySEvw2w8I0= -golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= +golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -305,14 +379,21 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -320,15 +401,23 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190415214537-1da14a5a36f2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191109021931-daa7c04131f5 h1:bHNaocaoJxYBo5cw41UyTMLjYlb8wPY7+WFrnklbHOM= -golang.org/x/net v0.0.0-20191109021931-daa7c04131f5/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0 h1:MsuvTghUPjX762sGLnGsxC3HM0B5r83wEtYcYR8/vRs= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -338,8 +427,10 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -351,9 +442,17 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -363,6 +462,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZe golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -378,17 +478,32 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010171213-8abd42400456/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0 h1:TgDr+1inK2XVUKZx3BYAqQg/GwucGdBkzZjWaTg/I+A= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -405,33 +520,48 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191009194640-548a555dbc03/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 h1:1+0Z5cgv1eDXJD9z2tdQF9PSSQnJXwism490hJydMRI= +google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1 h1:wdKvqQk7IttEw92GoRyKG2IDrUIpgpj6H6m81yfeMW0= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/bazil.org/fuse/fs/serve.go b/vendor/bazil.org/fuse/fs/serve.go index ac3efd311..033b8d59d 100644 --- a/vendor/bazil.org/fuse/fs/serve.go +++ b/vendor/bazil.org/fuse/fs/serve.go @@ -14,6 +14,7 @@ import ( "runtime" "strings" "sync" + "syscall" "time" "bazil.org/fuse" @@ -467,7 +468,6 @@ func (sn *serveNode) attr(ctx context.Context, attr *fuse.Attr) error { type serveHandle struct { handle Handle readData []byte - nodeID fuse.NodeID } // NodeRef is deprecated. It remains here to decrease code churn on @@ -501,9 +501,9 @@ func (c *Server) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) return id, sn.generation } -func (c *Server) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) { +func (c *Server) saveHandle(handle Handle) (id fuse.HandleID) { c.meta.Lock() - shandle := &serveHandle{handle: handle, nodeID: nodeID} + shandle := &serveHandle{handle: handle} if n := len(c.freeHandle); n > 0 { id = c.freeHandle[n-1] c.freeHandle = c.freeHandle[:n-1] @@ -756,7 +756,7 @@ func (e handleNotReaderError) Error() string { var _ fuse.ErrorNumber = handleNotReaderError{} func (e handleNotReaderError) Errno() fuse.Errno { - return fuse.ENOTSUP + return fuse.Errno(syscall.ENOTSUP) } func initLookupResponse(s *fuse.LookupResponse) { @@ -788,10 +788,11 @@ func (c *Server) serve(r fuse.Request) { } if snode == nil { c.meta.Unlock() + err := syscall.ESTALE c.debug(response{ Op: opName(r), Request: logResponseHeader{ID: hdr.ID}, - Error: fuse.ESTALE.ErrnoName(), + Error: fuse.Errno(err).ErrnoName(), // this is the only place that sets both Error and // Out; not sure if i want to do that; might get rid // of len(c.node) things altogether @@ -799,7 +800,7 @@ func (c *Server) serve(r fuse.Request) { MaxNode: fuse.NodeID(len(c.node)), }, }) - r.RespondError(fuse.ESTALE) + r.RespondError(err) return } node = snode.node @@ -824,17 +825,12 @@ func (c *Server) serve(r fuse.Request) { Request: logResponseHeader{ID: hdr.ID}, } if err, ok := resp.(error); ok { - msg.Error = err.Error() - if ferr, ok := err.(fuse.ErrorNumber); ok { - errno := ferr.Errno() - msg.Errno = errno.ErrnoName() - if errno == err { - // it's just a fuse.Errno with no extra detail; - // skip the textual message for log readability - msg.Error = "" - } - } else { - msg.Errno = fuse.DefaultErrno.ErrnoName() + errno := fuse.ToErrno(err) + msg.Errno = errno.ErrnoName() + if errno != err && syscall.Errno(errno) != err { + // if it's more than just a fuse.Errno or a + // syscall.Errno, log extra detail + msg.Error = err.Error() } } else { msg.Out = resp @@ -887,7 +883,7 @@ func (c *Server) serve(r fuse.Request) { // // Decent write-up on role of EINTR: // http://250bpm.com/blog:12 - err = fuse.EINTR + err = syscall.EINTR default: // nothing } @@ -907,7 +903,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, // Note: To FUSE, ENOSYS means "this server never implements this request." // It would be inappropriate to return ENOSYS for other operations in this // switch that might only be unavailable in some contexts, not all. - return fuse.ENOSYS + return syscall.ENOSYS case *fuse.StatfsRequest: s := &fuse.StatfsResponse{} @@ -956,7 +952,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, initLookupResponse(&s.LookupResponse) n, ok := node.(NodeSymlinker) if !ok { - return fuse.EIO // XXX or EPERM like Mkdir? + return syscall.EIO // XXX or EPERM like Mkdir? } n2, err := n.Symlink(ctx, r) if err != nil { @@ -972,7 +968,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.ReadlinkRequest: n, ok := node.(NodeReadlinker) if !ok { - return fuse.EIO /// XXX or EPERM? + return syscall.EIO /// XXX or EPERM? } target, err := n.Readlink(ctx, r) if err != nil { @@ -985,7 +981,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.LinkRequest: n, ok := node.(NodeLinker) if !ok { - return fuse.EIO /// XXX or EPERM? + return syscall.EIO /// XXX or EPERM? } c.meta.Lock() var oldNode *serveNode @@ -998,7 +994,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, Request: r.Hdr(), In: r, }) - return fuse.EIO + return syscall.EIO } n2, err := n.Link(ctx, r, oldNode.node) if err != nil { @@ -1016,7 +1012,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.RemoveRequest: n, ok := node.(NodeRemover) if !ok { - return fuse.EIO /// XXX or EPERM? + return syscall.EIO /// XXX or EPERM? } err := n.Remove(ctx, r) if err != nil { @@ -1046,7 +1042,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, } else if n, ok := node.(NodeRequestLookuper); ok { n2, err = n.Lookup(ctx, r, s) } else { - return fuse.ENOENT + return syscall.ENOENT } if err != nil { return err @@ -1063,7 +1059,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, initLookupResponse(&s.LookupResponse) n, ok := node.(NodeMkdirer) if !ok { - return fuse.EPERM + return syscall.EPERM } n2, err := n.Mkdir(ctx, r) if err != nil { @@ -1088,7 +1084,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, } else { h2 = node } - s.Handle = c.saveHandle(h2, r.Hdr().Node) + s.Handle = c.saveHandle(h2) done(s) r.Respond(s) return nil @@ -1097,7 +1093,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, n, ok := node.(NodeCreater) if !ok { // If we send back ENOSYS, FUSE will try mknod+open. - return fuse.EPERM + return syscall.EPERM } s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} initLookupResponse(&s.LookupResponse) @@ -1108,7 +1104,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { return err } - s.Handle = c.saveHandle(h2, r.Hdr().Node) + s.Handle = c.saveHandle(h2) done(s) r.Respond(s) return nil @@ -1116,7 +1112,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.GetxattrRequest: n, ok := node.(NodeGetxattrer) if !ok { - return fuse.ENOTSUP + return syscall.ENOTSUP } s := &fuse.GetxattrResponse{} err := n.Getxattr(ctx, r, s) @@ -1124,7 +1120,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, return err } if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { - return fuse.ERANGE + return syscall.ERANGE } done(s) r.Respond(s) @@ -1133,7 +1129,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.ListxattrRequest: n, ok := node.(NodeListxattrer) if !ok { - return fuse.ENOTSUP + return syscall.ENOTSUP } s := &fuse.ListxattrResponse{} err := n.Listxattr(ctx, r, s) @@ -1141,7 +1137,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, return err } if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { - return fuse.ERANGE + return syscall.ERANGE } done(s) r.Respond(s) @@ -1150,7 +1146,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.SetxattrRequest: n, ok := node.(NodeSetxattrer) if !ok { - return fuse.ENOTSUP + return syscall.ENOTSUP } err := n.Setxattr(ctx, r) if err != nil { @@ -1163,7 +1159,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.RemovexattrRequest: n, ok := node.(NodeRemovexattrer) if !ok { - return fuse.ENOTSUP + return syscall.ENOTSUP } err := n.Removexattr(ctx, r) if err != nil { @@ -1189,7 +1185,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.ReadRequest: shandle := c.getHandle(r.Handle) if shandle == nil { - return fuse.ESTALE + return syscall.ESTALE } handle := shandle.handle @@ -1254,7 +1250,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.WriteRequest: shandle := c.getHandle(r.Handle) if shandle == nil { - return fuse.ESTALE + return syscall.ESTALE } s := &fuse.WriteResponse{} @@ -1266,12 +1262,12 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, r.Respond(s) return nil } - return fuse.EIO + return syscall.EIO case *fuse.FlushRequest: shandle := c.getHandle(r.Handle) if shandle == nil { - return fuse.ESTALE + return syscall.ESTALE } handle := shandle.handle @@ -1287,7 +1283,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.ReleaseRequest: shandle := c.getHandle(r.Handle) if shandle == nil { - return fuse.ESTALE + return syscall.ESTALE } handle := shandle.handle @@ -1323,11 +1319,11 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, Request: r.Hdr(), In: r, }) - return fuse.EIO + return syscall.EIO } n, ok := node.(NodeRenamer) if !ok { - return fuse.EIO // XXX or EPERM like Mkdir? + return syscall.EIO // XXX or EPERM like Mkdir? } err := n.Rename(ctx, r, newDirNode.node) if err != nil { @@ -1340,7 +1336,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.MknodRequest: n, ok := node.(NodeMknoder) if !ok { - return fuse.EIO + return syscall.EIO } n2, err := n.Mknod(ctx, r) if err != nil { @@ -1358,7 +1354,7 @@ func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, case *fuse.FsyncRequest: n, ok := node.(NodeFsyncer) if !ok { - return fuse.EIO + return syscall.EIO } err := n.Fsync(ctx, r) if err != nil { diff --git a/vendor/bazil.org/fuse/fs/tree.go b/vendor/bazil.org/fuse/fs/tree.go index f20f298af..329be1389 100644 --- a/vendor/bazil.org/fuse/fs/tree.go +++ b/vendor/bazil.org/fuse/fs/tree.go @@ -7,6 +7,7 @@ import ( "os" pathpkg "path" "strings" + "syscall" "bazil.org/fuse" ) @@ -84,7 +85,7 @@ func (t *tree) Lookup(ctx context.Context, name string) (Node, error) { if n != nil { return n, nil } - return nil, fuse.ENOENT + return nil, syscall.ENOENT } func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { diff --git a/vendor/bazil.org/fuse/fuse.go b/vendor/bazil.org/fuse/fuse.go index cd5833843..b774a25c5 100644 --- a/vendor/bazil.org/fuse/fuse.go +++ b/vendor/bazil.org/fuse/fuse.go @@ -79,7 +79,7 @@ // is cancelled and no longer needed, the context will be cancelled. // Blocking operations should select on a receive from ctx.Done() and attempt to // abort the operation early if the receive succeeds (meaning the channel is closed). -// To indicate that the operation failed because it was aborted, return fuse.EINTR. +// To indicate that the operation failed because it was aborted, return syscall.EINTR. // // If an operation does not block for an indefinite amount of time, supporting // cancellation is not necessary. @@ -90,8 +90,8 @@ // inspect req.Pid, req.Uid, and req.Gid as necessary to implement // permission checking. The kernel FUSE layer normally prevents other // users from accessing the FUSE file system (to change this, see -// AllowOther, AllowRoot), but does not enforce access modes (to -// change this, see DefaultPermissions). +// AllowOther), but does not enforce access modes (to change this, see +// DefaultPermissions). // // Mount Options // @@ -323,6 +323,8 @@ type ErrorNumber interface { Errno() Errno } +// Deprecated: Return a syscall.Errno directly. See ToErrno for exact +// rules. const ( // ENOSYS indicates that the call is not supported. ENOSYS = Errno(syscall.ENOSYS) @@ -348,13 +350,14 @@ const ( const DefaultErrno = EIO var errnoNames = map[Errno]string{ - ENOSYS: "ENOSYS", - ESTALE: "ESTALE", - ENOENT: "ENOENT", - EIO: "EIO", - EPERM: "EPERM", - EINTR: "EINTR", - EEXIST: "EEXIST", + ENOSYS: "ENOSYS", + ESTALE: "ESTALE", + ENOENT: "ENOENT", + EIO: "EIO", + EPERM: "EPERM", + EINTR: "EINTR", + EEXIST: "EEXIST", + Errno(syscall.ENAMETOOLONG): "ENAMETOOLONG", } // Errno implements Error and ErrorNumber using a syscall.Errno. @@ -390,11 +393,28 @@ func (e Errno) MarshalText() ([]byte, error) { return []byte(s), nil } -func (h *Header) RespondError(err error) { - errno := DefaultErrno - if ferr, ok := err.(ErrorNumber); ok { - errno = ferr.Errno() +// ToErrno converts arbitrary errors to Errno. +// +// If the underlying type of err is syscall.Errno, it is used +// directly. No unwrapping is done, to prevent wrong errors from +// leaking via e.g. *os.PathError. +// +// If err unwraps to implement ErrorNumber, that is used. +// +// Finally, returns DefaultErrno. +func ToErrno(err error) Errno { + if err, ok := err.(syscall.Errno); ok { + return Errno(err) } + var errnum ErrorNumber + if errors.As(err, &errnum) { + return Errno(errnum.Errno()) + } + return DefaultErrno +} + +func (h *Header) RespondError(err error) { + errno := ToErrno(err) // FUSE uses negative errors! // TODO: File bug report against OSXFUSE: positive error causes kernel panic. buf := newBuffer(0) diff --git a/vendor/bazil.org/fuse/fuse_kernel.go b/vendor/bazil.org/fuse/fuse_kernel.go index 44b1fec0f..416d32aa1 100644 --- a/vendor/bazil.org/fuse/fuse_kernel.go +++ b/vendor/bazil.org/fuse/fuse_kernel.go @@ -633,8 +633,6 @@ func (fl WriteFlags) String() string { return flagString(uint32(fl), writeFlagNames) } -const compatStatfsSize = 48 - type statfsOut struct { St kstatfs } diff --git a/vendor/bazil.org/fuse/options.go b/vendor/bazil.org/fuse/options.go index d7abf2b7f..f09ffd4ed 100644 --- a/vendor/bazil.org/fuse/options.go +++ b/vendor/bazil.org/fuse/options.go @@ -148,37 +148,14 @@ func DaemonTimeout(name string) MountOption { return daemonTimeout(name) } -var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot") - // AllowOther allows other users to access the file system. -// -// Only one of AllowOther or AllowRoot can be used. func AllowOther() MountOption { return func(conf *mountConfig) error { - if _, ok := conf.options["allow_root"]; ok { - return ErrCannotCombineAllowOtherAndAllowRoot - } conf.options["allow_other"] = "" return nil } } -// AllowRoot allows root (but not just everyone) to access the file -// system. -// -// Only one of AllowOther or AllowRoot can be used. -// -// FreeBSD ignores this option. -func AllowRoot() MountOption { - return func(conf *mountConfig) error { - if _, ok := conf.options["allow_other"]; ok { - return ErrCannotCombineAllowOtherAndAllowRoot - } - conf.options["allow_root"] = "" - return nil - } -} - // AllowDev enables interpreting character or block special devices on the // filesystem. func AllowDev() MountOption { @@ -202,7 +179,7 @@ func AllowSUID() MountOption { // // Without this option, the Node itself decides what is and is not // allowed. This is normally ok because FUSE file systems cannot be -// accessed by other users without AllowOther/AllowRoot. +// accessed by other users without AllowOther. // // FreeBSD ignores this option. func DefaultPermissions() MountOption { diff --git a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json b/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json deleted file mode 100644 index ca022ccc4..000000000 --- a/vendor/cloud.google.com/go/compute/metadata/.repo-metadata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "metadata", - "name_pretty": "Google Compute Engine Metadata API", - "product_documentation": "https://cloud.google.com/compute/docs/storing-retrieving-metadata", - "client_documentation": "https://godoc.org/cloud.google.com/go/compute/metadata", - "release_level": "ga", - "language": "go", - "repo": "googleapis/google-cloud-go", - "distribution_name": "cloud.google.com/go/compute/metadata", - "api_id": "compute:metadata", - "requires_billing": false -} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 4ff4e2f1c..9b1afb5cc 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -68,7 +68,6 @@ var ( Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, - ResponseHeaderTimeout: 2 * time.Second, }, }} subscribeClient = &Client{hc: &http.Client{ @@ -304,7 +303,10 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { host = metadataIP } u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return "", "", err + } req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) res, err := c.hc.Do(req) @@ -407,11 +409,7 @@ func (c *Client) InstanceTags() ([]string, error) { // InstanceName returns the current VM's instance ID string. func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil + return c.getTrimmed("instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". diff --git a/vendor/github.com/Unknwon/goconfig/README.md b/vendor/github.com/Unknwon/goconfig/README.md index da6f14470..3a2218cb0 100644 --- a/vendor/github.com/Unknwon/goconfig/README.md +++ b/vendor/github.com/Unknwon/goconfig/README.md @@ -1,4 +1,4 @@ -goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) +goconfig [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) ======== [中文文档](README_ZH.md) @@ -26,15 +26,11 @@ The configuration file consists of sections, led by a `[section]` header and fol ## Installation - go get github.com/Unknwon/goconfig - -Or - - gopm get github.com/Unknwon/goconfig + go get github.com/unknwon/goconfig ## API Documentation -[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig). +[Go Walker](http://gowalker.org/github.com/unknwon/goconfig). ## Example diff --git a/vendor/github.com/anacrolix/dms/upnpav/upnpav.go b/vendor/github.com/anacrolix/dms/upnpav/upnpav.go deleted file mode 100644 index 2ba7f8ade..000000000 --- a/vendor/github.com/anacrolix/dms/upnpav/upnpav.go +++ /dev/null @@ -1,45 +0,0 @@ -package upnpav - -import ( - "encoding/xml" -) - -const ( - NoSuchObjectErrorCode = 701 -) - -type Resource struct { - XMLName xml.Name `xml:"res"` - ProtocolInfo string `xml:"protocolInfo,attr"` - URL string `xml:",chardata"` - Size uint64 `xml:"size,attr,omitempty"` - Bitrate uint `xml:"bitrate,attr,omitempty"` - Duration string `xml:"duration,attr,omitempty"` - Resolution string `xml:"resolution,attr,omitempty"` -} - -type Container struct { - Object - XMLName xml.Name `xml:"container"` - ChildCount int `xml:"childCount,attr"` -} - -type Item struct { - Object - XMLName xml.Name `xml:"item"` - Res []Resource -} - -type Object struct { - ID string `xml:"id,attr"` - ParentID string `xml:"parentID,attr"` - Restricted int `xml:"restricted,attr"` // indicates whether the object is modifiable - Class string `xml:"upnp:class"` - Icon string `xml:"upnp:icon,omitempty"` - Title string `xml:"dc:title"` - Artist string `xml:"upnp:artist,omitempty"` - Album string `xml:"upnp:album,omitempty"` - Genre string `xml:"upnp:genre,omitempty"` - AlbumArtURI string `xml:"upnp:albumArtURI,omitempty"` - Searchable int `xml:"searchable,attr"` -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 000000000..1c4967429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 285e54d67..a4eb6a7f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -70,7 +70,7 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer value = value.FieldByNameFunc(func(name string) bool { if c == name { return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + } else if !caseSensitive && strings.EqualFold(name, c) { return true } return false diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 8a7699b96..2def23fa1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -161,6 +161,17 @@ type Config struct { // on GetObject API calls. S3DisableContentMD5Validation *bool + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + LowerCaseHeaderMaps *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -249,6 +260,9 @@ type Config struct { // STSRegionalEndpoint will enable regional or legacy endpoint resolving STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder @@ -382,6 +396,13 @@ func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { } +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + // WithUseDualStack sets a config UseDualStack value returning a Config // pointer for chaining. func (c *Config) WithUseDualStack(enable bool) *Config { @@ -430,6 +451,13 @@ func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Con return c } +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -503,6 +531,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation } + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -534,6 +566,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { dst.STSRegionalEndpoint = other.STSRegionalEndpoint } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go index 66c5945db..2f9446333 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -2,42 +2,8 @@ package aws -import "time" - -// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to -// provide a 1.6 and 1.5 safe version of context that is compatible with Go -// 1.7's Context. -// -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case backgroundCtx: - return "aws.BackgroundContext" - } - return "unknown empty Context" -} - -var ( - backgroundCtx = new(emptyCtx) +import ( + "github.com/aws/aws-sdk-go/internal/context" ) // BackgroundContext returns a context that will never be canceled, has no @@ -52,5 +18,5 @@ var ( // // See https://golang.org/pkg/context for more information on Contexts. func BackgroundContext() Context { - return backgroundCtx + return context.BackgroundCtx } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 0c60e612e..aa902d708 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -161,7 +161,7 @@ func handleSendError(r *request.Request, err error) { } // Catch all request errors, and let the default retrier determine // if the error is retryable. - r.Error = awserr.New("RequestError", "send request failed", err) + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) // Override the error with a context canceled error, if that was canceled. ctx := r.Context() diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 000000000..5852b2648 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,22 @@ +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 000000000..388b21541 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 000000000..8152a864a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,39 @@ +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 000000000..4356edb3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 4af592158..c75d7bba0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -50,10 +50,11 @@ package credentials import ( "fmt" - "sync" + "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -197,20 +198,62 @@ func (e *Expiry) ExpiresAt() time.Time { // first instance of the credentials Value. All calls to Get() after that // will return the cached credentials Value until IsExpired() returns true. type Credentials struct { - creds Value - forceRefresh bool - - m sync.RWMutex + creds atomic.Value + sf singleflight.Group provider Provider } // NewCredentials returns a pointer to a new Credentials with the provider set. func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, + c := &Credentials{ + provider: provider, } + c.creds.Store(Value{}) + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", c.singleRetrieve) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve() (interface{}, error) { + if curCreds := c.creds.Load(); !c.isExpired(curCreds) { + return curCreds.(Value), nil + } + + creds, err := c.provider.Retrieve() + if err == nil { + c.creds.Store(creds) + } + + return creds, err } // Get returns the credentials value, or error if the credentials Value failed @@ -223,30 +266,7 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { - // Check the cached credentials first with just the read lock. - c.m.RLock() - if !c.isExpired() { - creds := c.creds - c.m.RUnlock() - return creds, nil - } - c.m.RUnlock() - - // Credentials are expired need to retrieve the credentials taking the full - // lock. - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil + return c.GetWithContext(backgroundContext()) } // Expire expires the credentials and forces them to be retrieved on the @@ -255,10 +275,7 @@ func (c *Credentials) Get() (Value, error) { // This will override the Provider's expired state, and force Credentials // to call the Provider's Retrieve(). func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true + c.creds.Store(Value{}) } // IsExpired returns if the credentials are no longer valid, and need @@ -267,31 +284,25 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.RLock() - defer c.m.RUnlock() - - return c.isExpired() + return c.isExpired(c.creds.Load()) } // isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() +func (c *Credentials) isExpired(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() } // ExpiresAt provides access to the functionality of the Expirer interface of // the underlying Provider, if it supports that interface. Otherwise, it returns // an error. func (c *Credentials) ExpiresAt() (time.Time, error) { - c.m.RLock() - defer c.m.RUnlock() - expirer, ok := c.provider.(Expirer) if !ok { return time.Time{}, awserr.New("ProviderNotExpirer", - fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName), nil) } - if c.forceRefresh { + if c.creds.Load().(Value) == (Value{}) { // set expiration time to the distant past return time.Time{}, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index 1980c8c14..e62483600 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -90,6 +90,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -142,7 +143,7 @@ const ( // DefaultBufSize limits buffer size from growing to an enormous // amount due to a faulty process. - DefaultBufSize = 1024 + DefaultBufSize = int(8 * sdkio.KibiByte) // DefaultTimeout default limit on time a process can run. DefaultTimeout = time.Duration(1) * time.Minute diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 2e528d130..9f37f44bc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -144,6 +144,13 @@ type AssumeRoleProvider struct { // Session name, if you wish to reuse the credentials elsewhere. RoleSessionName string + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. Duration time.Duration @@ -269,10 +276,12 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), - RoleArn: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - ExternalId: p.ExternalID, + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + TransitiveTagKeys: p.TransitiveTagKeys, } if p.Policy != nil { input.Policy = p.Policy diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index 9186587fc..835bcd49c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -89,7 +89,7 @@ func getMetricException(err awserr.Error) metricException { code := err.Code() switch code { - case "RequestError", + case request.ErrCodeRequestError, request.ErrCodeSerialization, request.CanceledErrorCode: return sdkException{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index d126764ce..12897eef6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "strconv" "strings" "time" @@ -12,8 +13,41 @@ import ( "github.com/aws/aws-sdk-go/internal/sdkuri" ) +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + // GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or +// instance metadata service. The content will be returned as a string, or // error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ @@ -21,11 +55,11 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { HTTPMethod: "GET", HTTPPath: sdkuri.PathJoin("/meta-data", p), } - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - err := req.Send() + req := c.NewRequest(op, nil, output) + + err := req.Send() return output.Content, err } @@ -41,13 +75,8 @@ func (c *EC2Metadata) GetUserData() (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { - if r.HTTPResponse.StatusCode == http.StatusNotFound { - r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) - } - }) - err := req.Send() + err := req.Send() return output.Content, err } @@ -63,8 +92,8 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { output := &metadataOutput{} req := c.NewRequest(op, nil, output) - err := req.Send() + err := req.Send() return output.Content, err } @@ -116,17 +145,17 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocument() if err != nil { return "", err } - - if len(resp) == 0 { - return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil + // returns region + return region, nil } // Available returns if the application has access to the EC2 Metadata service. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 4c5636e35..b8b2940d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -13,6 +13,7 @@ import ( "io" "net/http" "os" + "strconv" "strings" "time" @@ -24,9 +25,25 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" -const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,8 +80,10 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio // use a shorter timeout than default because the metadata // service is local if it is running, and to fail faster // if not running on an ec2 instance. - Timeout: 5 * time.Second, + Timeout: 1 * time.Second, } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) } svc := &EC2Metadata{ @@ -80,13 +99,27 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ), } - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) svc.Handlers.UnmarshalError.PushBack(unmarshalError) svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) // Disable the EC2 Metadata service if the environment variable is set. - // This shortcirctes the service's functionality to always fail to send + // This short-circuits the service's functionality to always fail to send // requests. if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { svc.Handlers.Send.SwapNamed(request.NamedHandler{ @@ -107,7 +140,6 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio for _, option := range opts { option(svc.Client) } - return svc } @@ -119,30 +151,74 @@ type metadataOutput struct { Content string } -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err) - return - } +type tokenOutput struct { + Token string + TTL time.Duration +} - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, } func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err) + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) return } // Response body format is not consistent between metadata endpoints. // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) + r.Error = awserr.NewRequestFailure(awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())), + r.HTTPResponse.StatusCode, r.RequestID) } func validateEndpointHandler(r *request.Request) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 000000000..663372a91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,92 @@ +package ec2metadata + +import ( + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(t.configuredTTL) + + if err != nil { + + // change the disabled flag on token provider to true, + // when error is request timeout error. + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + case http.StatusBadRequest: + r.Error = requestFailureError + } + + // Check if request timed out while waiting for response + if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { + if e.Code() == request.ErrCodeRequestError { + atomic.StoreUint32(&t.disabled, 1) + } + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 87b9ff3ff..343a2106f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -83,6 +83,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol p := &ps[i] custAddEC2Metadata(p) custAddS3DualStack(p) + custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) custFixAppAutoscalingUsGov(p) @@ -100,6 +101,33 @@ func custAddS3DualStack(p *partition) { custAddDualstack(p, "s3-control") } +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints["aws-global"]; ok { + return + } + + service.PartitionEndpoint = "aws-global" + service.Endpoints["us-east-1"] = endpoint{} + service.Endpoints["aws-global"] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + } + + p.Services["s3"] = service +} + func custAddDualstack(p *partition, svcName string) { s, ok := p.Services[svcName] if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index f63449788..36029a3d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -169,7 +169,7 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, - "acm": service{ + "access-analyzer": service{ Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -192,6 +192,59 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "acm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "acm-pca": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -209,12 +262,42 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "api.ecr": service{ @@ -425,11 +508,7 @@ var awsPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "ap-east-1": endpoint{}, @@ -462,9 +541,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -527,6 +609,7 @@ var awsPartition = partition{ "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -560,11 +643,7 @@ var awsPartition = partition{ }, "autoscaling-plans": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "autoscaling-plans", - }, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -675,9 +754,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -864,6 +949,7 @@ var awsPartition = partition{ "codecommit": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1029,6 +1115,9 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -1079,8 +1168,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1104,6 +1195,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "dataexchange": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "datapipeline": service{ Endpoints: endpoints{ @@ -1117,12 +1224,15 @@ var awsPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1151,6 +1261,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1166,6 +1277,8 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1205,7 +1318,8 @@ var awsPartition = partition{ "discovery": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dms": service{ @@ -1264,6 +1378,12 @@ var awsPartition = partition{ Region: "ap-southeast-2", }, }, + "ca-central-1": endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1322,6 +1442,7 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1505,6 +1626,7 @@ var awsPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1512,9 +1634,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1692,11 +1817,16 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1707,6 +1837,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -1830,8 +1961,10 @@ var awsPartition = partition{ "groundstation": service{ Endpoints: endpoints{ - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "eu-north-1": endpoint{}, + "me-south-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "guardduty": service{ @@ -2053,6 +2186,29 @@ var awsPartition = partition{ }, }, }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "iotthingsgraph": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2083,6 +2239,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2116,16 +2273,20 @@ var awsPartition = partition{ "kinesisanalytics": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -2134,11 +2295,20 @@ var awsPartition = partition{ "kinesisvideo": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2169,12 +2339,17 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2272,6 +2447,12 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "managedblockchain": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "marketplacecommerceanalytics": service{ Endpoints: endpoints{ @@ -2281,6 +2462,7 @@ var awsPartition = partition{ "mediaconnect": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2398,7 +2580,8 @@ var awsPartition = partition{ "mgh": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "eu-central-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mobileanalytics": service{ @@ -2414,9 +2597,10 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "monitoring": service{ @@ -2447,6 +2631,7 @@ var awsPartition = partition{ "mq": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2454,6 +2639,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2481,10 +2667,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mturk-requester": service{ @@ -2692,6 +2880,27 @@ var awsPartition = partition{ }, }, }, + "outposts": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "pinpoint": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2703,13 +2912,36 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "polly": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2721,6 +2953,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2803,6 +3036,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -2812,6 +3049,7 @@ var awsPartition = partition{ "ram": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2823,6 +3061,8 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2977,6 +3217,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -2984,6 +3225,7 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3000,9 +3242,10 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "runtime.sagemaker": service{ @@ -3053,7 +3296,7 @@ var awsPartition = partition{ }, }, "s3": service{ - PartitionEndpoint: "us-east-1", + PartitionEndpoint: "aws-global", IsRegionalized: boxedTrue, Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -3078,6 +3321,13 @@ var awsPartition = partition{ Hostname: "s3.ap-southeast-2.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, + "aws-global": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, @@ -3100,7 +3350,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", + Hostname: "s3.us-east-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, "us-east-2": endpoint{}, @@ -3278,6 +3528,16 @@ var awsPartition = partition{ }, }, }, + "schemas": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "sdb": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -3499,6 +3759,10 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3886,6 +4150,7 @@ var awsPartition = partition{ }, Endpoints: endpoints{ "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, @@ -3940,13 +4205,18 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "translate-fips.us-east-1.amazonaws.com", @@ -3961,6 +4231,7 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, "us-west-2-fips": endpoint{ Hostname: "translate-fips.us-west-2.amazonaws.com", @@ -4122,17 +4393,25 @@ var awscnPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com.cn", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, + "appsync": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "athena": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -4142,6 +4421,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ @@ -4204,6 +4490,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "dax": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "directconnect": service{ Endpoints: endpoints{ @@ -4275,6 +4567,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "elasticloadbalancing": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -4344,6 +4643,13 @@ var awscnPartition = partition{ "cn-north-1": endpoint{}, }, }, + "health": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -4423,6 +4729,17 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "neptune": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -4475,6 +4792,26 @@ var awscnPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "cn-northwest-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -4524,7 +4861,8 @@ var awscnPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "streams.dynamodb": service{ @@ -4591,6 +4929,19 @@ var awscnPartition = partition{ }, }, }, + "workspaces": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, }, } @@ -4623,6 +4974,13 @@ var awsusgovPartition = partition{ }, }, Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -4671,7 +5029,8 @@ var awsusgovPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ Service: "application-autoscaling", }, @@ -4714,6 +5073,22 @@ var awsusgovPartition = partition{ }, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "clouddirectory": service{ Endpoints: endpoints{ @@ -4792,6 +5167,12 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "comprehendmedical": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -4808,6 +5189,7 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -4899,6 +5281,7 @@ var awsusgovPartition = partition{ "elasticfilesystem": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -5374,6 +5757,18 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "swf": service{ Endpoints: endpoints{ @@ -5393,6 +5788,7 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, @@ -5476,11 +5872,7 @@ var awsisoPartition = partition{ }, "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "us-iso-east-1": endpoint{}, @@ -5808,11 +6200,7 @@ var awsisobPartition = partition{ Services: services{ "application-autoscaling": service{ Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, }, Endpoints: endpoints{ "us-isob-east-1": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index fadff07d6..ca956e5f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -50,12 +50,28 @@ type Options struct { // STS Regional Endpoint flag helps with resolving the STS endpoint STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint } -// STSRegionalEndpoint is an enum type alias for int -// It is used internally by the core sdk as STS Regional Endpoint flag value +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. type STSRegionalEndpoint int +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + const ( // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. @@ -86,6 +102,55 @@ func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { } } +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + // Set combines all of the option functions together. func (o *Options) Set(optFns ...func(*Options)) { for _, fn := range optFns { @@ -252,7 +317,7 @@ func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) ( // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. func (p Partition) Regions() map[string]Region { - rs := map[string]Region{} + rs := make(map[string]Region, len(p.p.Regions)) for id, r := range p.p.Regions { rs[id] = Region{ id: id, @@ -267,7 +332,7 @@ func (p Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. func (p Partition) Services() map[string]Service { - ss := map[string]Service{} + ss := make(map[string]Service, len(p.p.Services)) for id := range p.p.Services { ss[id] = Service{ id: id, @@ -354,7 +419,7 @@ func (s Service) Regions() map[string]Region { // A region is the AWS region the service exists in. Whereas a Endpoint is // an URL that can be resolved to a instance of a service. func (s Service) Endpoints() map[string]Endpoint { - es := map[string]Endpoint{} + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ id: id, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 000000000..df75e899a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go deleted file mode 100644 index 261396219..000000000 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go +++ /dev/null @@ -1,19 +0,0 @@ -package endpoints - -var stsLegacyGlobalRegions = map[string]struct{}{ - "ap-northeast-1": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, - "sa-east-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {}, -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 7b09adff6..eb2ac83c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -110,8 +110,9 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( region = s.PartitionEndpoint } - if service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint { - if _, ok := stsLegacyGlobalRegions[region]; ok { + if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || + (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { + if _, ok := legacyGlobalRegions[service][region]; ok { region = "aws-global" } } @@ -240,20 +241,6 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ merged.mergeIn(e) e = merged - hostname := e.Hostname - - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - } - - u := strings.Replace(hostname, "{service}", service, 1) - u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) - - scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) - u = fmt.Sprintf("%s://%s", scheme, u) - signingRegion := e.CredentialScope.Region if len(signingRegion) == 0 { signingRegion = region @@ -266,6 +253,20 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ signingNameDerived = true } + hostname := e.Hostname + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + region = signingRegion + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + return ResolvedEndpoint{ URL: u, PartitionID: partitionID, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 185b07318..e819ab6c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -10,6 +10,7 @@ import ( type Handlers struct { Validate HandlerList Build HandlerList + BuildStream HandlerList Sign HandlerList Send HandlerList ValidateResponse HandlerList @@ -28,6 +29,7 @@ func (h *Handlers) Copy() Handlers { return Handlers{ Validate: h.Validate.copy(), Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), Sign: h.Sign.copy(), Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), @@ -46,6 +48,7 @@ func (h *Handlers) Copy() Handlers { func (h *Handlers) Clear() { h.Validate.Clear() h.Build.Clear() + h.BuildStream.Clear() h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() @@ -67,6 +70,9 @@ func (h *Handlers) IsEmpty() bool { if h.Build.Len() != 0 { return false } + if h.BuildStream.Len() != 0 { + return false + } if h.Send.Len() != 0 { return false } @@ -320,3 +326,18 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { AddToUserAgent(r, s) } } + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header[k] = []string{v} + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 52178141d..d597c6ead 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -36,6 +36,10 @@ const ( // API request that was canceled. Requests given a aws.Context may // return this error when canceled. CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" ) // A Request is the service request to be made. @@ -51,6 +55,7 @@ type Request struct { HTTPRequest *http.Request HTTPResponse *http.Response Body io.ReadSeeker + streamingBody io.ReadCloser BodyStart int64 // offset from beginning of Body that the request body starts Params interface{} Error error @@ -130,8 +135,6 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } - SanitizeHostForHeader(httpReq) - r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -295,6 +298,13 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.ResetBody() } +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + // Presign returns the request's signed URL. Error will be returned // if the signing fails. The expire parameter is only used for presigned Amazon // S3 API requests. All other AWS services will use a fixed expiration @@ -414,11 +424,17 @@ func (r *Request) Sign() error { return r.Error } + SanitizeHostForHeader(r.HTTPRequest) + r.Handlers.Sign.Run(r) return r.Error } func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + if r.safeBody != nil { r.safeBody.Close() } @@ -623,6 +639,10 @@ func getHost(r *http.Request) string { return r.Host } + if r.URL == nil { + return "" + } + return r.URL.Host } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index f093fc542..64784e16f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -17,11 +17,13 @@ import ( // does the pagination between API operations, and Paginator defines the // configuration that will be used per page request. // -// cont := true -// for p.Next() && cont { +// for p.Next() { // data := p.Page().(*s3.ListObjectsOutput) // // process the page's data +// // ... +// // break out of loop to stop fetching additional pages // } +// // return p.Err() // // See service client API operation Pages methods for examples how the SDK will diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8015acc67..752ae47f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -75,7 +75,7 @@ func (d noOpRetryer) RetryRules(_ *Request) time.Duration { // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, + ErrCodeRequestError: {}, "RequestTimeout": {}, ErrCodeResponseTimeout: {}, "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout @@ -83,6 +83,7 @@ var retryableCodes = map[string]struct{}{ var throttleCodes = map[string]struct{}{ "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API "Throttling": {}, "ThrottlingException": {}, "RequestLimitExceeded": {}, @@ -91,6 +92,7 @@ var throttleCodes = map[string]struct{}{ "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -176,8 +178,8 @@ func shouldRetryError(origErr error) bool { origErr := err.OrigErr() var shouldRetry bool if origErr != nil { - shouldRetry := shouldRetryError(origErr) - if err.Code() == "RequestError" && !shouldRetry { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { return false } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 7713ccfca..cc64e24f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -47,10 +47,10 @@ func resolveCredentials(cfg *aws.Config, } // WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but -// 'AWS_IAM_ROLE_ARN' was not set. +// 'AWS_ROLE_ARN' was not set. var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) -// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but // 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 530cc3a9c..c1e0e9c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "strconv" + "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" @@ -128,11 +129,25 @@ type envConfig struct { // AWS_ROLE_SESSION_NAME=session_name RoleSessionName string - // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. // - // AWS_STS_REGIONAL_ENDPOINTS =sts_regional_endpoint + // AWS_STS_REGIONAL_ENDPOINTS=regional // This can take value as `regional` or `legacy` STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool } var ( @@ -190,6 +205,12 @@ var ( stsRegionalEndpointKey = []string{ "AWS_STS_REGIONAL_ENDPOINTS", } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -275,14 +296,39 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + var err error // STS Regional Endpoint variable for _, k := range stsRegionalEndpointKey { if v := os.Getenv(k); len(v) != 0 { - STSRegionalEndpoint, err := endpoints.GetSTSRegionalEndpoint(v) + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) if err != nil { return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) } - cfg.STSRegionalEndpoint = STSRegionalEndpoint + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 15fa64769..0ff499605 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -555,7 +555,20 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } // Regional Endpoint flag for STS endpoint resolving - mergeSTSRegionalEndpointConfig(cfg, envCfg, sharedCfg) + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) // Configure credentials if not already set by the user when creating the // Session. @@ -567,23 +580,33 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.Credentials = creds } + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + return nil } -// mergeSTSRegionalEndpointConfig function merges the STSRegionalEndpoint into cfg from -// envConfig and SharedConfig with envConfig being given precedence over SharedConfig -func mergeSTSRegionalEndpointConfig(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { - - cfg.STSRegionalEndpoint = envCfg.STSRegionalEndpoint - - if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = sharedCfg.STSRegionalEndpoint +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } } +} - if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint { - cfg.STSRegionalEndpoint = endpoints.LegacySTSEndpoint +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } } - return nil } func initHandlers(s *Session) { @@ -619,15 +642,22 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi region := aws.StringValue(s.Config.Region) resolved, err := s.resolveEndpoint(service, region, s.Config) - if err != nil && s.Config.Logger != nil { - s.Config.Logger.Log(fmt.Sprintf( - "ERROR: unable to resolve endpoint for service %q, region %q, err: %v", - service, region, err)) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) } return client.Config{ Config: s.Config, Handlers: s.Handlers, + PartitionID: resolved.PartitionID, Endpoint: resolved.URL, SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, @@ -653,6 +683,11 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp // precedence. opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + // Support the condition where the service is modeled but its // endpoint metadata is not available. opt.ResolveUnknownService = true diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 857466896..a8ed88076 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -44,10 +44,16 @@ const ( // Additional config fields for regional or legacy endpoints stsRegionalEndpointSharedKey = `sts_regional_endpoints` + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" ) // sharedConfig represents the configuration fields of the SDK config files. @@ -86,17 +92,30 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool + // CSM Options CSMEnabled *bool CSMHost string CSMPort string CSMClientID string - // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service // - // sts_regional_endpoints = sts_regional_endpoint + // sts_regional_endpoints = regional // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool } type sharedConfigFile struct { @@ -259,10 +278,19 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e sre, err := endpoints.GetSTSRegionalEndpoint(v) if err != nil { return fmt.Errorf("failed to load %s from shared config, %s, %v", - stsRegionalEndpointKey, file.Filename, err) + stsRegionalEndpointSharedKey, file.Filename, err) } cfg.STSRegionalEndpoint = sre } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -288,6 +316,8 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.CSMPort, section, csmPortKey) updateString(&cfg.CSMClientID, section, csmClientIDKey) + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + return nil } @@ -380,6 +410,15 @@ func updateString(dst *string, section ini.Section, key string) { *dst = section.String(key) } +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.Bool(key) +} + // updateBoolPtr will only update the dst with the value in the section key, // key is present in the section. func updateBoolPtr(dst **bool, section ini.Section, key string) { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go index 244c86da0..07ea799fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -1,8 +1,7 @@ package v4 import ( - "net/http" - "strings" + "github.com/aws/aws-sdk-go/internal/strings" ) // validator houses a set of rule needed for validation of a @@ -61,7 +60,7 @@ type patterns []string // been found func (p patterns) IsValid(value string) bool { for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + if strings.HasPrefixFold(value, pattern) { return true } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 000000000..f35fc860b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 000000000..fed5c859c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,13 @@ +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 000000000..02cbd97e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 8104793aa..d71f7b3f4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -76,9 +76,14 @@ import ( ) const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + authHeaderPrefix = "AWS4-HMAC-SHA256" timeFormat = "20060102T150405Z" shortTimeFormat = "20060102" + awsV4Request = "aws4_request" // emptyStringSHA256 is a SHA256 of an empty string emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` @@ -87,9 +92,9 @@ const ( var ignoredHeaders = rules{ blacklist{ mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - "X-Amzn-Trace-Id": struct{}{}, + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, }, }, } @@ -229,11 +234,9 @@ type signingCtx struct { DisableURIPathEscaping bool - credValues credentials.Value - isPresign bool - formattedTime string - formattedShortTime string - unsignedPayload bool + credValues credentials.Value + isPresign bool + unsignedPayload bool bodyDigest string signedHeaders string @@ -337,7 +340,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi } var err error - ctx.credValues, err = v4.Credentials.Get() + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) if err != nil { return http.Header{}, err } @@ -532,39 +535,56 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildSignature() // depends on string to sign if ctx.isPresign { - ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature } else { parts := []string{ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, "SignedHeaders=" + ctx.signedHeaders, - "Signature=" + ctx.signature, + authHeaderSignatureElem + ctx.signature, } - ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) } return nil } -func (ctx *signingCtx) buildTime() { - ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) - ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { if ctx.isPresign { duration := int64(ctx.ExpireTime / time.Second) - ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) } else { - ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) } } func (ctx *signingCtx) buildCredentialString() { - ctx.credentialString = strings.Join([]string{ - ctx.formattedShortTime, - ctx.Region, - ctx.ServiceName, - "aws4_request", - }, "/") + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) if ctx.isPresign { ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) @@ -588,8 +608,7 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { var headers []string headers = append(headers, "host") for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { + if !r.IsValid(k) { continue // ignored header } if ctx.SignedHeaderVals == nil { @@ -653,19 +672,15 @@ func (ctx *signingCtx) buildCanonicalString() { func (ctx *signingCtx) buildStringToSign() { ctx.stringToSign = strings.Join([]string{ authHeaderPrefix, - ctx.formattedTime, + formatTime(ctx.Time), ctx.credentialString, - hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), }, "\n") } func (ctx *signingCtx) buildSignature() { - secret := ctx.credValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) - region := makeHmac(date, []byte(ctx.Region)) - service := makeHmac(region, []byte(ctx.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(ctx.stringToSign)) + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) ctx.signature = hex.EncodeToString(signature) } @@ -726,13 +741,13 @@ func (ctx *signingCtx) removePresign() { ctx.Query.Del("X-Amz-SignedHeaders") } -func makeHmac(key []byte, data []byte) []byte { +func hmacSHA256(key []byte, data []byte) []byte { hash := hmac.New(sha256.New, key) hash.Write(data) return hash.Sum(nil) } -func makeSha256(data []byte) []byte { +func hashSHA256(data []byte) []byte { hash := sha256.New() hash.Write(data) return hash.Sum(nil) @@ -804,3 +819,28 @@ func stripExcessSpaces(vals []string) { vals[i] = string(buf[:m]) } } + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 455091540..d542ef01b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -2,6 +2,7 @@ package aws import ( "io" + "strings" "sync" "github.com/aws/aws-sdk-go/internal/sdkio" @@ -205,3 +206,36 @@ func (b *WriteAtBuffer) Bytes() []byte { defer b.m.Unlock() return b.buf } + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 199dbfee5..2fa107e6c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.31" +const SDKVersion = "1.29.9" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 000000000..876dcb3fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 000000000..d008ae27c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 000000000..14ad0c589 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go index ecc7bf82f..151054971 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -101,7 +101,7 @@ func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { } headers.Set(h.Name, value) } - (*hs) = decodedHeaders(headers) + *hs = decodedHeaders(headers) return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go index 4b972b2d6..474339391 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -21,10 +21,24 @@ type Decoder struct { // NewDecoder initializes and returns a Decoder for decoding event // stream messages from the reader provided. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ r: r, } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } } // Decode attempts to decode a single message from the event stream reader. @@ -40,6 +54,15 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { }() } + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { crc := crc32.New(crc32IEEETable) hashReader := io.TeeReader(reader, crc) @@ -72,12 +95,6 @@ func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { return m, nil } -// UseLogger specifies the Logger that that the decoder should use to log the -// message decode to. -func (d *Decoder) UseLogger(logger aws.Logger) { - d.logger = logger -} - func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { w := bytes.NewBuffer(nil) defer func() { logger.Log(w.String()) }() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go index 150a60981..ffade3bc0 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -3,61 +3,107 @@ package eventstream import ( "bytes" "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" "hash" "hash/crc32" "io" + + "github.com/aws/aws-sdk-go/aws" ) // Encoder provides EventStream message encoding. type Encoder struct { - w io.Writer + w io.Writer + logger aws.Logger headersBuf *bytes.Buffer } // NewEncoder initializes and returns an Encoder to encode Event Stream // messages to an io.Writer. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ w: w, headersBuf: bytes.NewBuffer(nil), } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } } // Encode encodes a single EventStream message to the io.Writer the Encoder // was created with. An error is returned if writing the message fails. -func (e *Encoder) Encode(msg Message) error { +func (e *Encoder) Encode(msg Message) (err error) { e.headersBuf.Reset() - err := encodeHeaders(e.headersBuf, msg.Headers) - if err != nil { + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { return err } crc := crc32.New(crc32IEEETable) - hashWriter := io.MultiWriter(e.w, crc) + hashWriter := io.MultiWriter(writer, crc) headersLen := uint32(e.headersBuf.Len()) payloadLen := uint32(len(msg.Payload)) - if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { return err } if headersLen > 0 { - if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { return err } } if payloadLen > 0 { - if _, err := hashWriter.Write(msg.Payload); err != nil { + if _, err = hashWriter.Write(msg.Payload); err != nil { return err } } msgCRC := crc.Sum32() - return binary.Write(e.w, binary.BigEndian, msgCRC) + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) } func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { @@ -86,7 +132,9 @@ func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) return nil } -func encodeHeaders(w io.Writer, headers Headers) error { +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { for _, h := range headers { hn := headerName{ Len: uint8(len(h.Name)), diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go index 5ea5a988b..34c2e89d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -1,6 +1,9 @@ package eventstreamapi -import "fmt" +import ( + "fmt" + "sync" +) type messageError struct { code string @@ -22,3 +25,53 @@ func (e messageError) Error() string { func (e messageError) OrigErr() error { return nil } + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go similarity index 77% rename from vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go index 97937c8e5..bb8ea5da1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -2,9 +2,7 @@ package eventstreamapi import ( "fmt" - "io" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" ) @@ -15,27 +13,8 @@ type Unmarshaler interface { UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error } -// EventStream headers with specific meaning to async API functionality. -const ( - MessageTypeHeader = `:message-type` // Identifies type of message. - EventMessageType = `event` - ErrorMessageType = `error` - ExceptionMessageType = `exception` - - // Message Events - EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". - - // Message Error - ErrorCodeHeader = `:error-code` - ErrorMessageHeader = `:error-message` - - // Message Exception - ExceptionTypeHeader = `:exception-type` -) - // EventReader provides reading from the EventStream of an reader. type EventReader struct { - reader io.ReadCloser decoder *eventstream.Decoder unmarshalerForEventType func(string) (Unmarshaler, error) @@ -47,27 +26,18 @@ type EventReader struct { // NewEventReader returns a EventReader built from the reader and unmarshaler // provided. Use ReadStream method to start reading from the EventStream. func NewEventReader( - reader io.ReadCloser, + decoder *eventstream.Decoder, payloadUnmarshaler protocol.PayloadUnmarshaler, unmarshalerForEventType func(string) (Unmarshaler, error), ) *EventReader { return &EventReader{ - reader: reader, - decoder: eventstream.NewDecoder(reader), + decoder: decoder, payloadUnmarshaler: payloadUnmarshaler, unmarshalerForEventType: unmarshalerForEventType, payloadBuf: make([]byte, 10*1024), } } -// UseLogger instructs the EventReader to use the logger and log level -// specified. -func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { - if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { - r.decoder.UseLogger(logger) - } -} - // ReadEvent attempts to read a message from the EventStream and return the // unmarshaled event value that the message is for. // @@ -95,8 +65,7 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) { case EventMessageType: return r.unmarshalEventMessage(msg) case ExceptionMessageType: - err = r.unmarshalEventException(msg) - return nil, err + return nil, r.unmarshalEventException(msg) case ErrorMessageType: return nil, r.unmarshalErrorMessage(msg) default: @@ -174,11 +143,6 @@ func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) return msgErr } -// Close closes the EventReader's EventStream reader. -func (r *EventReader) Close() error { - return r.reader.Close() -} - // GetHeaderString returns the value of the header as a string. If the header // is not set or the value is not a string an error will be returned. func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 000000000..e46b8acc2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 000000000..3a7ba5cd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 000000000..433bb1630 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 000000000..10a3823df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,109 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} + +//type EventEncoder struct { +// encoder Encoder +// ppayloadMarshaler protocol.PayloadMarshaler +// eventTypeFor func(Marshaler) (string, error) +//} +// +//func (e EventEncoder) Encode(event Marshaler) error { +// msg, err := e.marshal(event) +// if err != nil { +// return err +// } +// +// return w.encoder.Encode(msg) +//} +// +//func (e EventEncoder) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// +//func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { +// eventType, err := w.eventTypeFor(event) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg, err := event.MarshalEvent(w.payloadMarshaler) +// if err != nil { +// return eventstream.Message{}, err +// } +// +// msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) +// return msg, nil +//} +// diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go index e3fc0766a..9f509d8f6 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -461,6 +461,11 @@ func (v *TimestampValue) decode(r io.Reader) error { return nil } +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + func timeFromEpochMilli(t int64) time.Time { secs := t / 1e3 msec := t % 1e3 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go index 2dc012a66..25c9783cd 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -27,7 +27,7 @@ func (m *Message) rawMessage() (rawMessage, error) { if len(m.Headers) > 0 { var headers bytes.Buffer - if err := encodeHeaders(&headers, m.Headers); err != nil { + if err := EncodeHeaders(&headers, m.Headers); err != nil { return rawMessage{}, err } raw.Headers = headers.Bytes() diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index ea0da79a5..5e9499699 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "reflect" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -45,10 +46,31 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error { return err } - return unmarshalAny(reflect.ValueOf(v), out, "") + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") } -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { vtype := value.Type() if vtype.Kind() == reflect.Ptr { vtype = vtype.Elem() // check kind of actual element type @@ -80,17 +102,17 @@ func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) if field, ok := vtype.FieldByName("_"); ok { tag = field.Tag } - return unmarshalStruct(value, data, tag) + return u.unmarshalStruct(value, data, tag) case "list": - return unmarshalList(value, data, tag) + return u.unmarshalList(value, data, tag) case "map": - return unmarshalMap(value, data, tag) + return u.unmarshalMap(value, data, tag) default: - return unmarshalScalar(value, data, tag) + return u.unmarshalScalar(value, data, tag) } } -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -114,7 +136,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa // unwrap any payloads if payload := tag.Get("payload"); payload != "" { field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) } for i := 0; i < t.NumField(); i++ { @@ -128,9 +150,19 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa if locName := field.Tag.Get("locationName"); locName != "" { name = locName } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) + err := u.unmarshalAny(member, mapData[name], field.Tag) if err != nil { return err } @@ -138,7 +170,7 @@ func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTa return nil } -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -153,7 +185,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) } for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") + err := u.unmarshalAny(value.Index(i), c, "") if err != nil { return err } @@ -162,7 +194,7 @@ func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) return nil } -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { if data == nil { return nil } @@ -179,14 +211,14 @@ func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) kvalue := reflect.ValueOf(k) vvalue := reflect.New(value.Type().Elem()).Elem() - unmarshalAny(vvalue, v, "") + u.unmarshalAny(vvalue, v, "") value.SetMapIndex(kvalue, vvalue) } return nil } -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { switch d := data.(type) { case nil: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go index e21614a12..0ea0647a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -64,7 +64,7 @@ func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error metadata.ClientInfo{}, request.Handlers{}, nil, - &request.Operation{HTTPMethod: "GET"}, + &request.Operation{HTTPMethod: "PUT"}, v, nil, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 000000000..9d521dcb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 0cb99eb57..d40346a77 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,7 +1,7 @@ // Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index f69c1efc9..9231e95d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -1,6 +1,6 @@ package query -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go import ( "encoding/xml" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 74e361e07..92f8b4d9a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" "github.com/aws/aws-sdk-go/private/protocol" ) @@ -28,7 +29,9 @@ var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta func Unmarshal(r *request.Request) { if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } } } @@ -40,12 +43,21 @@ func UnmarshalMeta(r *request.Request) { r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") } if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } } } -func unmarshalBody(r *request.Request, v reflect.Value) { +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) @@ -57,35 +69,38 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + payload.Set(reflect.ValueOf(b)) + case *string: defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + default: switch payload.Type().String() { case "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, + return awserr.New(request.ErrCodeSerialization, "failed to read response body", err) - return } payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New(request.ErrCodeSerialization, + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -94,9 +109,11 @@ func unmarshalBody(r *request.Request, v reflect.Value) { } } } + + return nil } -func unmarshalLocationElements(r *request.Request, v reflect.Value) { +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { for i := 0; i < v.NumField(); i++ { m, field := v.Field(i), v.Type().Field(i) if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { @@ -111,26 +128,25 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { switch field.Tag.Get("location") { case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + unmarshalStatusCode(m, resp.StatusCode) + case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - break + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } + case "headers": prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) - break + awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } - if r.Error != nil { - return - } } + + return nil } func unmarshalStatusCode(v reflect.Value, statusCode int) { @@ -145,7 +161,7 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) { } } -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { if len(headers) == 0 { return nil } @@ -153,8 +169,12 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err case map[string]*string: // we only support string map value types out := map[string]*string{} for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } out[k[len(prefix):]] = &v[0] } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index 07a6187ea..b1ae36487 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -2,8 +2,8 @@ // requests and responses. package restxml -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go -//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go import ( "bytes" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go index da1a68111..f614ef898 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -19,3 +19,9 @@ func UnmarshalDiscardBody(r *request.Request) { io.Copy(ioutil.Discard, r.HTTPResponse.Body) r.HTTPResponse.Body.Close() } + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 000000000..cc857f136 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 04a928640..52e87308f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "sync" - "sync/atomic" "time" "github.com/aws/aws-sdk-go/aws" @@ -20,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) const opAbortMultipartUpload = "AbortMultipartUpload" @@ -66,11 +66,31 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// Aborts a multipart upload. +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. // // To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. +// the part storage, you should call the ListParts operation and ensure that +// the parts list is empty. +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to AbortMultipartUpload: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -151,6 +171,64 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // Completes a multipart upload by assembling previously uploaded parts. // +// You first initiate the multipart upload and then upload all parts using the +// UploadPart operation. After successfully uploading all relevant parts of +// an upload, you call this operation to complete the upload. Upon receiving +// this request, Amazon S3 concatenates all the parts in ascending order by +// part number to create a new object. In the Complete Multipart Upload request, +// you must provide the parts list. You must ensure that the parts list is complete. +// This operation concatenates the parts that you provide in the list. For each +// part in the list, you must provide the part number and the ETag value, returned +// after that part was uploaded. +// +// Processing of a Complete Multipart Upload request could take several minutes +// to complete. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. Because a request could fail after the initial +// 200 OK response has been sent, it is important that you check the response +// body to determine whether the request succeeded. +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information about permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// GetBucketLifecycle has the following special errors: +// +// * Error code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. 400 Bad Request +// +// * Error code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// entity tag might not have matched the part's entity tag. 400 Bad Request +// +// * Error code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. 400 Bad Request +// +// * Error code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. 404 Not Found +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -225,6 +303,194 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Creates a copy of an object that is already stored in Amazon S3. // +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic operation using +// this API. However, for copying an object greater than 5 GB, you must use +// the multipart upload Upload Part - Copy API. For more information, see Copy +// Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// +// Amazon S3 transfer acceleration does not support cross-region copies. If +// you request a cross-region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information about transfer acceleration, +// see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// All copy requests must be authenticated. Additionally, you must have read +// access to the source object and write access to the destination bucket. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. +// +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, +// x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since. +// +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. +// +// You can use this operation to change the storage class of an object that +// is already stored in Amazon S3 using the StorageClass parameter. For more +// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). +// +// The source object that you are copying can be encrypted or unencrypted. If +// the source object is encrypted, it can be encrypted by server-side encryption +// using AWS managed encryption keys or by using a customer-provided encryption +// key. When copying an object, you can request that Amazon S3 encrypt the target +// object by using either the AWS managed encryption keys or by using your own +// encryption key. You can do this regardless of the form of server-side encryption +// that was used to encrypt the source, or even if the source object was not +// encrypted. For more information about server-side encryption, see Using Server-Side +// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// A copy request might return an error when Amazon S3 receives the copy request +// or while Amazon S3 is copying the files. If the error occurs before the copy +// operation starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK response. +// This means that a 200 OK response can contain either a success or an error. +// Design your application to parse the contents of the response and handle +// it appropriately. +// +// If the copy is successful, you receive a response with information about +// the copied object. +// +// If the request is an HTTP 1.1 request, the response is chunk encoded. If +// it were not, it would not contain the content-length, and you would need +// to read the entire body. +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 +// returns 200 OK and copies the data: x-amz-copy-source-if-match condition +// evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates +// to false +// +// * Consideration 2 – If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// and evaluate as follows, Amazon S3 returns the 412 Precondition Failed +// response code: x-amz-copy-source-if-none-match condition evaluates to +// false x-amz-copy-source-if-modified-since condition evaluates to true +// +// The copy request charge is based on the storage class and Region you specify +// for the destination object. For pricing information, see Amazon S3 Pricing +// (https://aws.amazon.com/s3/pricing/). +// +// Following are other considerations when using CopyObject: +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. (If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted.) To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see . +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// To encrypt the target object, you must provide the appropriate encryption-related +// request headers. The one you use depends on whether you want to use AWS managed +// encryption keys or provide your own encryption key. +// +// * To encrypt the target object using server-side encryption with an AWS +// managed encryption key, provide the following request headers, as appropriate. +// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want +// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id +// of the symmetric customer managed CMK. Amazon S3 only supports symmetric +// CMKs and not asymmetric CMKs. For more information, see Using Symmetric +// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the AWS Key Management Service Developer Guide. All GET and PUT requests +// for an object protected by AWS KMS fail if you don't make them with SSL +// or by using SigV4. For more information about server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with CMKs stored in KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * To encrypt the target object using server-side encryption with an encryption +// key that you provide, use the following headers. x-amz-server-side​-encryption​-customer-algorithm +// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 +// +// * If the source object is encrypted using server-side encryption with +// customer-provided encryption keys, you must use the following headers. +// x-amz-copy-source​-server-side​-encryption​-customer-algorithm x-amz-copy-source​-server-side​-encryption​-customer-key +// x-amz-copy-source-​server-side​-encryption​-customer-key-MD5 For +// more information about server-side encryption with CMKs stored in AWS +// KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs +// stored in Amazon KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants the AWS +// accounts identified by email addresses permissions to read object data +// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// The following operations are related to CopyObject: +// +// * PutObject +// +// * GetObject +// +// For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -235,7 +501,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" // The source object of the COPY operation is not in the active tier and is -// only stored in Amazon Glacier. +// only stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -303,7 +569,60 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new bucket. +// Creates a new bucket. To create a bucket, you must register with Amazon S3 +// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests +// are never allowed to create buckets. By creating the bucket, you become the +// bucket owner. +// +// Not every string is an acceptable bucket name. For information on bucket +// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// +// By default, the bucket is created in the US East (N. Virginia) Region. You +// can optionally specify a Region in the request body. You might choose a Region +// to optimize latency, minimize costs, or address regulatory requirements. +// For example, if you reside in Europe, you will probably find it advantageous +// to create buckets in the EU (Ireland) Region. For more information, see How +// to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// +// If you send your create bucket request to the s3.amazonaws.com endpoint, +// the request goes to the us-east-1 Region. Accordingly, the signature calculations +// in Signature Version 4 must use us-east-1 as the Region, even if the location +// constraint in the request specifies another Region where the bucket is to +// be created. If you create a bucket in a Region other than US East (N. Virginia), +// your application must be able to handle 307 redirect. For more information, +// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// +// When creating a bucket using this operation, you can optionally specify the +// accounts or groups that should be granted specific permissions on the bucket. +// There are two ways to grant the appropriate permissions using the request +// headers. +// +// * Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. For more information, see +// Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, +// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control +// headers. These headers map to the set of permissions Amazon S3 supports +// in an ACL. For more information, see Access Control List (ACL) Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants the AWS +// accounts identified by email addresses permissions to read object data +// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// The following operations are related to CreateBucket: +// +// * PutObject +// +// * DeleteBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -318,6 +637,11 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // by all users of the system. Please select a different name and try again. // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all AWS Regions except in the North Virginia Region. +// For legacy compatibility, if you re-create an existing bucket that you already +// own in the North Virginia Region, Amazon S3 returns 200 OK and resets the +// bucket access control lists (ACLs). // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { @@ -385,13 +709,147 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// Initiates a multipart upload and returns an upload ID. +// This operation initiates a multipart upload and returns an upload ID. This +// upload ID is used to associate all of the parts in the specific multipart +// upload. You specify this upload ID in each of your subsequent upload part +// requests (see UploadPart). You also include this upload ID in the final request +// to either complete or abort the multipart upload request. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged -// for storage of the uploaded parts. Only after you either complete or abort -// multipart upload, Amazon S3 frees up the parts storage and stops charging -// you for the parts storage. +// For more information about multipart uploads, see Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the upload must complete within the number of days specified in the bucket +// lifecycle configuration. Otherwise, the incomplete multipart upload becomes +// eligible for an abort operation and Amazon S3 aborts the multipart upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// For information about the permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (AWS Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stop charging you for storing them only after you either complete +// or abort a multipart upload. +// +// You can optionally request server-side encryption. For server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You can provide your own encryption key, +// or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or +// Amazon S3-managed encryption keys. If you choose to provide your own encryption +// key, the request headers you provide in UploadPart) and UploadPartCopy) requests +// must match the headers you used in the request to initiate the upload by +// using CreateMultipartUpload. +// +// To perform a multipart upload with encryption using an AWS KMS CMK, the requester +// must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, +// and kms:DescribeKey actions on the key. These permissions are required because +// Amazon S3 must decrypt and read data from the encrypted file parts before +// it completes the multipart upload. +// +// If your AWS Identity and Access Management (IAM) user or role is in the same +// AWS account as the AWS KMS CMK, then you must have these permissions on the +// key policy. If your IAM user or role belongs to a different account than +// the key, then you must have the permissions on both the key policy and your +// IAM user or role. +// +// For more information, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Permissions +// +// When copying an object, you can optionally specify the accounts or groups +// that should be granted specific permissions on the new object. There are +// two ways to grant the permissions using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and +// PUT requests for an object protected by AWS KMS fail if you don't make +// them with SSL or by using SigV4. For more information about server-side +// encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting Data +// Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key +// x-amz-server-side​-encryption​-customer-key-MD5 For more information +// about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see +// Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the access control list (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants the AWS +// accounts identified by email addresses permissions to read object data +// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// The following operations are related to CreateMultipartUpload: +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -466,8 +924,14 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // DeleteBucket API operation for Amazon Simple Storage Service. // -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// Deletes the bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Related Resources +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -547,7 +1011,20 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner -// can grant this permission to others. +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -622,7 +1099,20 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // DeleteBucketCors API operation for Amazon Simple Storage Service. // -// Deletes the CORS configuration information set for the bucket. +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources: +// +// * +// +// * RESTOPTIONSobject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -697,7 +1187,23 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// Deletes the server-side encryption configuration from the bucket. +// This implementation of the DELETE operation removes default encryption from +// the bucket. For information about the Amazon S3 default encryption feature, +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev//bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketEncryption +// +// * GetBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -775,6 +1281,23 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // Deletes an inventory configuration (identified by the inventory ID) from // the bucket. // +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// * GetBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -848,7 +1371,27 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // DeleteBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deletes the lifecycle configuration from the bucket. +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketLifecycleConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -923,8 +1466,28 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // -// Deletes a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -999,7 +1562,29 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// Deletes the policy from the bucket. +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the DeleteBucketPolicy permissions on the specified bucket and +// belong to the bucket owner's account to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to DeleteBucketPolicy +// +// * CreateBucket +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1074,10 +1659,26 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // DeleteBucketReplication API operation for Amazon Simple Storage Service. // -// Deletes the replication configuration from the bucket. For information about -// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// The following operations are related to DeleteBucketReplication: +// +// * PutBucketReplication +// +// * GetBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1153,6 +1754,16 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // Deletes the tags from the bucket. // +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// * GetBucketTagging +// +// * PutBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1226,7 +1837,26 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration from the bucket. +// This operation removes the website configuration for a bucket. Amazon S3 +// returns a 200 OK response upon successfully deleting a website configuration +// on the specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE operation requires the S3:DeleteBucketWebsite permission. By +// default, only the bucket owner can delete the website configuration attached +// to a bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// * GetBucketWebsite +// +// * PutBucketWebsite // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1304,6 +1934,29 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // marker, which becomes the latest version of the object. If there isn't a // null version, Amazon S3 does not remove any objects. // +// To remove a specific version, you must be the bucket owner and you must use +// the version Id subresource. Using this subresource permanently deletes the +// version. If the object deleted is a delete marker, Amazon S3 sets the response +// header, x-amz-delete-marker, to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. +// +// For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). +// To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// You can delete objects by explicitly calling the DELETE Object API or configure +// its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for +// you. If you want to block users or accounts from removing or deleting objects +// from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, +// and s3:PutLifeCycleConfiguration actions. +// +// The following operation is related to DeleteObject: +// +// * PutObject +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1376,7 +2029,21 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // DeleteObjectTagging API operation for Amazon Simple Storage Service. // -// Removes the tag-set from an existing object. +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * PutObjectTagging +// +// * GetObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1451,7 +2118,47 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // // This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request contains a list of up to 1000 keys that you want to delete. In +// the XML, you provide the object key names, and optionally, version IDs if +// you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success, or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion, the operation does not return any information about +// the delete in the response body. +// +// When performing this operation on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). +// +// Finally, the Content-MD5 header is required for all Multi-Object Delete requests. +// Amazon S3 uses the header value to ensure that your request body has not +// been altered in transit. +// +// The following operations are related to DeleteObjects: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1526,7 +2233,21 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // DeletePublicAccessBlock API operation for Amazon Simple Storage Service. // -// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * GetBucketPolicyStatus // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1600,7 +2321,32 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Returns the accelerate configuration of a bucket. +// This implementation of the GET operation uses the accelerate subresource +// to return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev//transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * PutBucketAccelerateConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1674,7 +2420,15 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// Gets the access control policy for the bucket. +// This implementation of the GET operation uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission +// is granted to the anonymous user, you can return the ACL of the bucket without +// using an authorization header. +// +// Related Resources +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1748,8 +2502,27 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// Gets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// This implementation of the GET operation returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1823,7 +2596,20 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the CORS configuration for the bucket. +// Returns the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// For more information about cors, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// * PutBucketCors +// +// * DeleteBucketCors // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1897,7 +2683,21 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the server-side encryption configuration of a bucket. +// Returns the default encryption configuration for an Amazon S3 bucket. For +// information about the Amazon S3 default encryption feature, see Amazon S3 +// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// * PutBucketEncryption +// +// * DeleteBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1971,8 +2771,25 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Returns an inventory configuration (identified by the inventory ID) from -// the bucket. +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations +// +// * PutBucketInventoryConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2051,7 +2868,34 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration. +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2129,7 +2973,37 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Returns the lifecycle configuration information set on the bucket. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are still using previous version +// of the lifecycle configuration, it works. For the earlier API description, +// see GetBucketLifecycle. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// * GetBucketLifecycle +// +// * PutBucketLifecycle +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2203,7 +3077,17 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // GetBucketLocation API operation for Amazon Simple Storage Service. // -// Returns the region the bucket resides in. +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket. +// +// To use this implementation of the operation, you must be the bucket owner. +// +// The following operations are related to GetBucketLocation: +// +// * GetObject +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2280,6 +3164,12 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // Returns the logging status of a bucket and the permissions users have to // view and modify that status. To use GET, you must be the bucket owner. // +// The following operations are related to GetBucketLogging: +// +// * CreateBucket +// +// * PutBucketLogging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2353,7 +3243,26 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Gets a metrics configuration (specified by the metrics configuration ID) -// from the bucket. +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// * PutBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2432,7 +3341,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see the GetBucketNotificationConfiguration operation. +// No longer used, see GetBucketNotificationConfiguration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2512,6 +3421,22 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // +// If notifications are not enabled on the bucket, the operation returns an +// empty NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketNotification: +// +// * PutBucketNotification +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2584,7 +3509,26 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // GetBucketPolicy API operation for Amazon Simple Storage Service. // -// Returns the policy of a specified bucket. +// Returns the policy of a specified bucket. If you are using an identity other +// than the root user of the AWS account that owns the bucket, the calling identity +// must have the GetBucketPolicy permissions on the specified bucket and belong +// to the bucket owner's account in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operation is related to GetBucketPolicy: +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2659,7 +3603,22 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re // GetBucketPolicyStatus API operation for Amazon Simple Storage Service. // // Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * GetPublicAccessBlock +// +// * PutPublicAccessBlock +// +// * DeletePublicAccessBlock // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2739,6 +3698,25 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // to all Amazon S3 systems. Therefore, a get request soon after put or delete // can return a wrong result. // +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// This operation requires permissions for the s3:GetReplicationConfiguration +// action. For more information about permissions, see Using Bucket Policies +// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see ReplicationErrorCodeList +// +// The following operations are related to GetBucketReplication: +// +// * PutBucketReplication +// +// * DeleteBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2811,7 +3789,13 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // GetBucketRequestPayment API operation for Amazon Simple Storage Service. // -// Returns the request payment configuration of a bucket. +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// * ListObjects // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2887,6 +3871,21 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // Returns the tag set associated with the bucket. // +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// * PutBucketTagging +// +// * DeleteBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2961,6 +3960,20 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // Returns the versioning state of a bucket. // +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// * GetObject +// +// * PutObject +// +// * DeleteObject +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3033,7 +4046,21 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // GetBucketWebsite API operation for Amazon Simple Storage Service. // -// Returns the website configuration for a bucket. +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET operation requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to DeleteBucketWebsite: +// +// * DeleteBucketWebsite +// +// * PutBucketWebsite // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3107,7 +4134,130 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // GetObject API operation for Amazon Simple Storage Service. // -// Retrieves objects from Amazon S3. +// Retrieves objects from Amazon S3. To use GET, you must have READ access to +// the object. If you grant READ access to the anonymous user, you can return +// the object without using an authorization header. +// +// An Amazon S3 bucket has no directory hierarchy such as you would find in +// a typical computer file system. You can, however, create a logical hierarchy +// by using object key names that imply a folder structure. For example, instead +// of naming an object sample.jpg, you can name it photos/2006/February/sample.jpg. +// +// To get an object from such a logical hierarchy, specify the full key name +// for the object in the GET operation. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the resource +// as /photos/2006/February/sample.jpg. For a path-style request example, if +// you have the object photos/2006/February/sample.jpg in the bucket named examplebucket, +// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For +// more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). +// +// To distribute large files to many people, you can save bandwidth costs by +// using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// For more information about returning the ACL of an object, see GetObjectAcl. +// +// If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE +// storage classes, before you can retrieve the object you must first restore +// a copy using . Otherwise, this operation returns an InvalidObjectStateError +// error. For information about restoring archived objects, see Restoring Archived +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you GET the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging +// action), the response also returns the x-amz-tagging-count header that provides +// the count of number of tags associated with the object. You can use GetObjectTagging +// to retrieve the tag set associated with an object. +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 will +// return an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 will return +// an HTTP status code 403 ("access denied") error. +// +// Versioning +// +// By default, the GET operation returns the current version of an object. To +// return a different version, use the versionId subresource. +// +// If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. +// +// For more information about versioning, see PutBucketVersioning. +// +// Overriding Response Header Values +// +// There are times when you want to override certain response header values +// in a GET response. For example, you might override the Content-Disposition +// response header value in your GET request. +// +// You can override values for a set of response headers using the following +// query parameters. These response header values are sent only on a successful +// request, that is, when status code 200 OK is returned. The set of headers +// you can override using these parameters is a subset of the headers that Amazon +// S3 accepts when you create an object. The response headers that you can override +// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, +// Content-Disposition, and Content-Encoding. To override these header values +// in the GET response, you use the following request parameters. +// +// You must sign the request, either using an Authorization header or a presigned +// URL, when using these parameters. They cannot be used with an unsigned (anonymous) +// request. +// +// * response-content-type +// +// * response-content-language +// +// * response-expires +// +// * response-cache-control +// +// * response-content-disposition +// +// * response-content-encoding +// +// Additional Considerations about Request Headers +// +// If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since +// condition evaluates to false; then, S3 returns 200 OK and the data requested. +// +// If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since +// condition evaluates to true; then, S3 returns 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// The following operations are related to GetObject: +// +// * ListBuckets +// +// * GetObjectAcl // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3186,7 +4336,21 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // GetObjectAcl API operation for Amazon Simple Storage Service. // -// Returns the access control list (ACL) of an object. +// Returns the access control list (ACL) of an object. To use this operation, +// you must have READ_ACP access to the object. +// +// Versioning +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// The following operations are related to GetObjectAcl: +// +// * GetObject +// +// * DeleteObject +// +// * PutObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3265,7 +4429,8 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Gets an object's current Legal Hold status. +// Gets an object's current Legal Hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3339,9 +4504,10 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration // GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Gets the object lock configuration for a bucket. The rule specified in the -// object lock configuration will be applied by default to every new object -// placed in the specified bucket. +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3415,7 +4581,8 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // GetObjectRetention API operation for Amazon Simple Storage Service. // -// Retrieves an object's retention settings. +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3489,7 +4656,25 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // GetObjectTagging API operation for Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET operation returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following operation is related to GetObjectTagging: +// +// * PutObjectTagging // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3563,7 +4748,19 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // GetObjectTorrent API operation for Amazon Simple Storage Service. // -// Return torrent files from a bucket. +// Return torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. For more information about BitTorrent, see +// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// +// You can get torrent only for objects that are less than 5 GB in size and +// that are not encrypted using server-side encryption with customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// The following operation is related to GetObjectTorrent: +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3637,7 +4834,30 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // GetPublicAccessBlock API operation for Amazon Simple Storage Service. // -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// * PutPublicAccessBlock +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3713,7 +4933,15 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // // This operation is useful to determine if a bucket exists and you have permission -// to access it. +// to access it. The operation returns a 200 OK if the bucket exists and you +// have permission to access it. Otherwise, the operation might return responses +// such as 404 Not Found and 403 Forbidden. +// +// To use this operation, you must have permissions to perform the s3:ListBucket +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3796,6 +5024,63 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // object itself. This operation is useful if you're only interested in an object's // metadata. To use HEAD, you must have READ access to the object. // +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers: +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for GET requests if your object uses server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed +// encryption keys (SSE-S3). If your object does use these types of keys, you’ll +// get an HTTP 400 BadRequest error. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Consider the following when using request headers: +// +// * Consideration 1 – If both of the If-Match and If-Unmodified-Since +// headers are present in the request as follows: If-Match condition evaluates +// to true, and; If-Unmodified-Since condition evaluates to false; Then Amazon +// S3 returns 200 OK and the data requested. +// +// * Consideration 2 – If both of the If-None-Match and If-Modified-Since +// headers are present in the request as follows: If-None-Match condition +// evaluates to false, and; If-Modified-Since condition evaluates to true; +// Then Amazon S3 returns the 304 Not Modified response code. +// +// For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// Permissions +// +// You need the s3:GetObject permission for this operation. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// If the object you request does not exist, the error Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// +// * If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 ("no such key") error. +// +// * If you don’t have the s3:ListBucket permission, Amazon S3 returns +// an HTTP status code 403 ("access denied") error. +// +// The following operation is related to HeadObject: +// +// * GetObject +// // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses // for more information on returned errors. // @@ -3871,7 +5156,33 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. // -// Lists the analytics configurations for the bucket. +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated +// is set to false. If there are more configurations to list, IsTruncated is +// set to true, and there will be a value in NextContinuationToken. You use +// the NextContinuationToken value to continue the pagination of the list by +// passing the value in continuation-token in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// * GetBucketAnalyticsConfiguration +// +// * DeleteBucketAnalyticsConfiguration +// +// * PutBucketAnalyticsConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3945,7 +5256,33 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. // -// Returns a list of inventory configurations for the bucket. +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * PutBucketInventoryConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4019,7 +5356,34 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. // -// Lists the metrics configurations for the bucket. +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This operation supports list pagination and does not return more than 100 +// configurations at a time. Always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there is a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// * PutBucketMetricsConfiguration +// +// * GetBucketMetricsConfiguration +// +// * DeleteBucketMetricsConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4173,7 +5537,40 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. +// This operation lists in-progress multipart uploads. An in-progress multipart +// upload is a multipart upload that has been initiated using the Initiate Multipart +// Upload request, but has not yet been completed or aborted. +// +// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// multipart uploads is the maximum number of uploads a response can include, +// which is also the default value. You can further limit the number of uploads +// in a response by specifying the max-uploads parameter in the response. If +// additional multipart uploads satisfy the list criteria, the response will +// contain an IsTruncated element with the value true. To list the additional +// multipart uploads, use the key-marker and upload-id-marker request parameters. +// +// In the response, the uploads are sorted by key. If your application has initiated +// more than one multipart upload using the same object key, then uploads in +// the response are first sorted by key. Additionally, uploads are sorted in +// ascending order within each key by the upload initiation time. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListMultipartUploads: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * ListParts +// +// * AbortMultipartUpload // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4246,10 +5643,12 @@ func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4303,7 +5702,24 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // ListObjectVersions API operation for Amazon Simple Storage Service. // -// Returns metadata about all of the versions of objects in a bucket. +// Returns metadata about all of the versions of objects in a bucket. You can +// also use request parameters as selection criteria to return metadata about +// a subset of all the object versions. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4376,10 +5792,12 @@ func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObje }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4433,9 +5851,27 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // ListObjects API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This API has been revised. We recommend that you use the newer version, ListObjectsV2, +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// * ListObjectsV2 +// +// * GetObject +// +// * PutObject +// +// * CreateBucket +// +// * ListBuckets // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4513,10 +5949,12 @@ func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInpu }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4570,10 +6008,34 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // ListObjectsV2 API operation for Amazon Simple Storage Service. // -// Returns some or all (up to 1000) of the objects in a bucket. You can use +// Returns some or all (up to 1,000) of the objects in a bucket. You can use // the request parameters as selection criteria to return a subset of the objects -// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend -// you use this revised API for new application development. +// in a bucket. A 200 OK response can contain valid or invalid XML. Make sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// To use this operation in an AWS Identity and Access Management (IAM) policy, +// you must have permissions to perform the s3:ListBucket action. The bucket +// owner has this permission by default and can grant this permission to others. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// This section describes the latest revision of the API. We recommend that +// you use this revised API for application development. For backward compatibility, +// Amazon S3 continues to support the prior version of this API, ListObjects. +// +// To get a list of your buckets, see ListBuckets. +// +// The following operations are related to ListObjectsV2: +// +// * GetObject +// +// * PutObject +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4651,10 +6113,12 @@ func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2 }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4709,6 +6173,33 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // ListParts API operation for Amazon Simple Storage Service. // // Lists the parts that have been uploaded for a specific multipart upload. +// This operation must include the upload ID, which you obtain by sending the +// initiate multipart upload request (see CreateMultipartUpload). This request +// returns a maximum of 1,000 uploaded parts. The default number of parts returned +// is 1,000 parts. You can restrict the number of parts returned by specifying +// the max-parts request parameter. If your multipart upload consists of more +// than 1,000 parts, the response returns an IsTruncated field with the value +// of true, and a NextPartNumberMarker element. In subsequent ListParts requests +// you can include the part-number-marker query string parameter and set its +// value to the NextPartNumberMarker field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). +// +// For information on permissions required to use the multipart upload API, +// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// +// The following operations are related to ListParts: +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4781,10 +6272,12 @@ func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, f }, } - cont := true - for p.Next() && cont { - cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } } + return p.Err() } @@ -4833,7 +6326,41 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// Sets the accelerate configuration of an existing bucket. +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// * Enabled – Enables accelerated data transfers to the bucket. +// +// * Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration operation returns the transfer acceleration +// state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// * GetBucketAccelerateConfiguration +// +// * CreateBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4908,7 +6435,80 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // PutBucketAcl API operation for Amazon Simple Storage Service. // -// Sets the permissions on a bucket using access control lists (ACL). +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// * Specify the ACL in the request body +// +// * Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use the x-amz-acl header to +// set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-write header grants create, +// overwrite, and delete objects permission to LogDelivery group predefined +// by Amazon S3 and two AWS accounts identified by their email addresses. +// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetObjectAcl // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4984,7 +6584,50 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // // Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Special Errors +// +// * HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// * HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// * HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// Related Resources +// +// * +// +// * +// +// * // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5059,7 +6702,49 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // PutBucketCors API operation for Amazon Simple Storage Service. // -// Sets the CORS configuration for a bucket. +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// * The request's Origin header must match AllowedOrigin elements. +// +// * The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// * Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketCors +// +// * DeleteBucketCors +// +// * RESTOPTIONSobject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5134,8 +6819,28 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// Creates a new server-side encryption configuration (or replaces an existing -// one, if present). +// This implementation of the PUT operation uses the encryption subresource +// to set the default encryption state of an existing bucket. +// +// This implementation of the PUT operation sets default encryption for a bucket +// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS +// customer master keys (CMKs) (SSE-KMS). +// +// This operation requires AWS Signature Version 4. For more information, see +// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related Resources +// +// * GetBucketEncryption +// +// * DeleteBucketEncryption // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5210,8 +6915,54 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// Adds an inventory configuration (identified by the inventory ID) from the -// bucket. +// This implementation of the PUT operation adds an inventory configuration +// (identified by the inventory ID) to the bucket. You can have up to 1,000 +// inventory configurations per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same AWS Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev//storage-inventory.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis. (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutInventoryConfiguration +// bucket permission to set the configuration on the bucket +// +// Related Resources +// +// * GetBucketInventoryConfiguration +// +// * DeleteBucketInventoryConfiguration +// +// * ListBucketInventoryConfigurations // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5291,7 +7042,55 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// No longer used, see the PutBucketLifecycleConfiguration operation. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration. +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev//object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the AWS account that created the resource, +// can access it. The resource owner can optionally grant access permissions +// to others by writing an access policy. For this operation, users must get +// the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev//intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// Related Resources +// +// * GetBucketLifecycle(Deprecated) +// +// * GetBucketLifecycleConfiguration +// +// * +// +// * By default, a resource owner—in this case, a bucket owner, which is +// the AWS account that created the bucket—can perform any of the operations. +// A resource owner can also grant others permission to perform the operation. +// For more information, see the following topics in the Amazon Simple Storage +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5370,8 +7169,69 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. // -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. +// Accordingly, this section describes the latest API. The previous version +// of the API supported filtering based only on an object key name prefix, which +// is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle. +// +// Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. Each rule +// consists of the following: +// +// * Filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination +// of both. +// +// * Status whether the rule is in effect. +// +// * One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the AWS account that created +// it) can access the resource. The resource owner can optionally grant access +// permissions to others by writing an access policy. For this operation, a +// user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit deny also supersedes any +// other permissions. If you want to block users or accounts from removing or +// deleting objects from your bucket, you must deny them permissions for the +// following actions: +// +// * s3:DeleteObject +// +// * s3:DeleteObjectVersion +// +// * s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// The following are related to PutBucketLifecycleConfiguration: +// +// * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// * GetBucketLifecycleConfiguration +// +// * DeleteBucketLifecycle // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5447,9 +7307,52 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // PutBucketLogging API operation for Amazon Simple Storage Service. // // Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of +// can view and modify the logging parameters. All logs are saved to buckets +// in the same AWS Region as the source bucket. To set the logging status of // a bucket, you must be the bucket owner. // +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). +// +// For more information about creating a bucket, see CreateBucket. For more +// information about returning the logging status of a bucket, see GetBucketLogging. +// +// The following operations are related to PutBucketLogging: +// +// * PutObject +// +// * DeleteBucket +// +// * CreateBucket +// +// * GetBucketLogging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5524,7 +7427,33 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. // // Sets a metrics configuration (specified by the metrics configuration ID) -// for the bucket. +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// * DeleteBucketMetricsConfiguration +// +// * PutBucketMetricsConfiguration +// +// * ListBucketMetricsConfigurations +// +// GetBucketLifecycle has the following special error: +// +// * Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5683,7 +7612,55 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. // -// Enables notifications of specified events for a bucket. +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This operation replaces the existing notification configuration with the +// configuration you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of AWS Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with s3:PutBucketNotification permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT operation +// will fail, and Amazon S3 will not add the configuration to your bucket. +// +// Responses +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following operation is related to PutBucketNotificationConfiguration: +// +// * GetBucketNotificationConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5758,7 +7735,28 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following operations are related to PutBucketPolicy: +// +// * CreateBucket +// +// * DeleteBucket // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5834,9 +7832,56 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // PutBucketReplication API operation for Amazon Simple Storage Service. // // Creates a replication configuration or replaces an existing one. For more -// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // +// To perform this operation, the user or role performing the operation must +// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket where you want +// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to +// replicate objects on your behalf, and other relevant information. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. All rules must specify +// the same destination bucket. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// By default, a resource owner, in this case the AWS account that created the +// bucket, can perform this operation. The resource owner can also grant others +// permissions to perform the operation. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with CMKs stored in AWS KMS. To replicate AWS KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see ReplicationErrorCodeList +// +// The following operations are related to PutBucketReplication: +// +// * GetBucketReplication +// +// * DeleteBucketReplication +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5913,8 +7958,14 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // Sets the request payment configuration for a bucket. By default, the bucket // owner pays for downloads from the bucket. This configuration parameter enables // the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// * CreateBucket +// +// * GetBucketRequestPayment // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5991,6 +8042,47 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // Sets the tags for a bucket. // +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Please try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// The following operations are related to PutBucketTagging: +// +// * GetBucketTagging +// +// * DeleteBucketTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6067,6 +8159,38 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // Sets the versioning state of an existing bucket. To set the versioning state, // you must be the bucket owner. // +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning request does not return a versioning state value. +// +// If the bucket owner enables MFA Delete in the bucket versioning configuration, +// the bucket owner must include the x-amz-mfa request header and the Status +// and the MfaDelete request elements in a request to set the versioning state +// of the bucket. +// +// If you have an object expiration lifecycle policy in your non-versioned bucket +// and you want to maintain the same permanent delete behavior when you enable +// versioning, you must add a noncurrent expiration policy. The noncurrent expiration +// lifecycle policy will manage the deletes of the noncurrent object versions +// in the version-enabled bucket. (A version-enabled bucket maintains one current +// and zero or more noncurrent object versions.) For more information, see Lifecycle +// and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// Related Resources +// +// * CreateBucket +// +// * DeleteBucket +// +// * GetBucketVersioning +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6140,7 +8264,67 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // PutBucketWebsite API operation for Amazon Simple Storage Service. // -// Set the website configuration for a bucket. +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// * WebsiteConfiguration +// +// * RedirectAllRequestsTo +// +// * HostName +// +// * Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// * WebsiteConfiguration +// +// * IndexDocument +// +// * Suffix +// +// * ErrorDocument +// +// * Key +// +// * RoutingRules +// +// * RoutingRule +// +// * Condition +// +// * HttpErrorCodeReturnedEquals +// +// * KeyPrefixEquals +// +// * Redirect +// +// * Protocol +// +// * HostName +// +// * ReplaceKeyPrefixWith +// +// * ReplaceKeyWith +// +// * HttpRedirectCode // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6214,7 +8398,179 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // PutObject API operation for Amazon Simple Storage Service. // -// Adds an object to a bucket. +// Adds an object to a bucket. You must have WRITE permissions on a bucket to +// add an object to it. +// +// Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. Amazon S3 does not provide object locking; if you need this, make +// sure to build it into your application layer or use versioning instead. +// +// To ensure that data is not corrupted traversing the network, use the Content-MD5 +// header. When you use this header, Amazon S3 checks the object against the +// provided MD5 value and, if they do not match, returns an error. Additionally, +// you can calculate the MD5 while putting an object to Amazon S3 and compare +// the returned ETag to the calculated MD5 value. +// +// To configure your application to send the request headers before sending +// the request body, use the 100-continue HTTP status code. For PUT operations, +// this helps you avoid sending the message body if the message is rejected +// based on the headers (for example, because authentication fails or a redirect +// occurs). For more information on the 100-continue HTTP status code, see Section +// 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt (http://www.ietf.org/rfc/rfc2616.txt). +// +// You can optionally request server-side encryption. With server-side encryption, +// Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts the data when you access it. You have the option to provide +// your own encryption key or use AWS managed encryption keys. For more information, +// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// Access Permissions +// +// You can optionally specify the accounts or groups that should be granted +// specific permissions on the new object. There are two ways to grant the permissions +// using the request headers: +// +// * Specify a canned ACL with the x-amz-acl request header. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters +// map to the set of permissions that Amazon S3 supports in an ACL. For more +// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want +// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id +// of the symmetric customer managed CMK. Amazon S3 only supports symmetric +// CMKs and not asymmetric CMKs. For more information, see Using Symmetric +// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the AWS Key Management Service Developer Guide. All GET and PUT requests +// for an object protected by AWS KMS fail if you don't make them with SSL +// or by using SigV4. For more information about server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key +// x-amz-server-side​-encryption​-customer-key-MD5 For more information +// about server-side encryption with CMKs stored in KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Access-Control-List (ACL)-Specific Request Headers +// +// You also can use the following access control–related headers with this +// operation. By default, all objects are private. Only the owner has full access +// control. When adding a new object, you can grant permissions to individual +// AWS accounts or to predefined groups defined by Amazon S3. These permissions +// are then added to the Access Control List (ACL) on the object. For more information, +// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// With this operation, you can grant access permissions using one of the following +// two methods: +// +// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined +// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees +// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly — To explicitly grant access +// permissions to specific AWS accounts or groups, use the following headers. +// Each header maps to specific permissions that Amazon S3 supports in an +// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// In the header, you specify a list of grantees who get the specific permission. +// To grant permissions explicitly use: x-amz-grant-read x-amz-grant-write +// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You +// specify each grantee as a type=value pair, where the type is one of the +// following: emailAddress – if the value specified is the email address +// of an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) EU (Ireland) South America (São Paulo) For a list of all the +// Amazon S3 supported Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference id – if the value specified is the canonical +// user ID of an AWS account uri – if you are granting permissions to a +// predefined group For example, the following x-amz-grant-read header grants +// the AWS accounts identified by email addresses permissions to read object +// data and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" +// +// Server-Side- Encryption-Specific Request Headers +// +// You can optionally tell Amazon S3 to encrypt data at rest using server-side +// encryption. Server-side encryption is for data encryption at rest. Amazon +// S3 encrypts your data as it writes it to disks in its data centers and decrypts +// it when you access it. The option you use depends on whether you want to +// use AWS-managed encryption keys or provide your own encryption key. +// +// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) +// stored in AWS Key Management Service (AWS KMS) – If you want AWS to +// manage the keys used to encrypt data, specify the following headers in +// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, +// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon +// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want +// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id +// of the symmetric customer managed CMK. Amazon S3 only supports symmetric +// CMKs and not asymmetric CMKs. For more information, see Using Symmetric +// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) +// in the AWS Key Management Service Developer Guide. All GET and PUT requests +// for an object protected by AWS KMS fail if you don't make them with SSL +// or by using SigV4. For more information about server-side encryption with +// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// * Use customer-provided encryption keys – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// If you use this feature, the ETag value that Amazon S3 returns in the +// response is not the MD5 of the object. x-amz-server-side​-encryption​-customer-algorithm +// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 +// For more information about server-side encryption with CMKs stored in +// AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with +// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// +// Storage Class Options +// +// By default, Amazon S3 uses the Standard storage class to store newly created +// objects. The Standard storage class provides high durability and high availability. +// You can specify other storage classes depending on the performance needs. +// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Versioning +// +// If you enable versioning for a bucket, Amazon S3 automatically generates +// a unique version ID for the object being stored. Amazon S3 returns this ID +// in the response using the x-amz-version-id response header. If versioning +// is suspended, Amazon S3 always uses null as the version ID for the object +// stored. For more information about returning the versioning state of a bucket, +// see GetBucketVersioning. If you enable versioning for a bucket, when Amazon +// S3 receives multiple write requests for the same object simultaneously, it +// stores all of the objects. +// +// Related Resources +// +// * CopyObject +// +// * DeleteObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6288,8 +8644,73 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // PutObjectAcl API operation for Amazon Simple Storage Service. // -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket +// Uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket. You must have WRITE_ACP permission +// to set the ACL of an object. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. +// +// Access Permissions +// +// You can set access permissions using one of the following methods: +// +// * Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (AWS +// accounts or Amazon S3 groups) who will receive the permission. If you +// use these ACL-specific headers, you cannot use x-amz-acl header to set +// a canned ACL. These parameters map to the set of permissions that Amazon +// S3 supports in an ACL. For more information, see Access Control List (ACL) +// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: emailAddress – if the value specified is the email address +// of an AWS account id – if the value specified is the canonical user +// ID of an AWS account uri – if you are granting permissions to a predefined +// group For example, the following x-amz-grant-read header grants list objects +// permission to the two AWS accounts identified by their email addresses. +// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. +// +// * By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// Related Resources +// +// * CopyObject +// +// * GetObject // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6370,6 +8791,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // // Applies a Legal Hold configuration to the specified object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6442,10 +8867,17 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Places an object lock configuration on the specified bucket. The rule specified -// in the object lock configuration will be applied by default to every new +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new // object placed in the specified bucket. // +// DefaultRetention requires either Days or Years. You can't specify both at +// the same time. +// +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6520,6 +8952,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // // Places an Object Retention configuration on an object. // +// Related Resources +// +// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6594,6 +9030,43 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // Sets the supplied tag-set to an object that already exists in a bucket // +// A tag is a key-value pair. You can associate tags with an object by sending +// a PUT request against the tagging subresource that is associated with the +// object. You can retrieve tags by sending a GET request. For more information, +// see GetObjectTagging. +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// Special Errors +// +// * Code: InvalidTagError Cause: The tag provided was not a valid tag. This +// error can occur if the tag did not pass input validation. For more information, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// * Code: MalformedXMLError Cause: The XML provided does not match the schema. +// +// * Code: OperationAbortedError Cause: A conflicting conditional operation +// is currently in progress against this resource. Please try again. +// +// * Code: InternalError Cause: The service was unable to apply the provided +// tag to the object. +// +// Related Resources +// +// * GetObjectTagging +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6668,7 +9141,29 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req // PutPublicAccessBlock API operation for Amazon Simple Storage Service. // // Creates or modifies the PublicAccessBlock configuration for an Amazon S3 -// bucket. +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// Related Resources +// +// * GetPublicAccessBlock +// +// * DeletePublicAccessBlock +// +// * GetBucketPolicyStatus +// +// * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6744,6 +9239,190 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restores an archived copy of an object back into Amazon S3 // +// This operation performs the following types of requests: +// +// * select - Perform a select query on an archived object +// +// * restore an archive - Restore an archived object +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// and s3:GetObject actions. The bucket owner has this permission by default +// and can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Querying Archives with Select Requests +// +// You use a select type of request to perform SQL queries on archived objects. +// The archived objects that are being queried by the select request must be +// formatted as uncompressed comma-separated values (CSV) files. You can run +// queries and custom analytics on your archived data without having to restore +// your data to a hotter Amazon S3 tier. For an overview about select requests, +// see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, do the following: +// +// * Define an output location for the select query's output. This must be +// an Amazon S3 bucket in the same AWS Region as the bucket that contains +// the archive object that is being queried. The AWS account that initiates +// the job must have permissions to write to the S3 bucket. You can specify +// the storage class and encryption for the output objects stored in the +// bucket. For more information about output, see Querying Archived Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about the S3 structure in the request body, see the following: PutObject +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon Simple Storage Service Developer Guide Protecting Data Using +// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide +// +// * Define the SQL expression for the SELECT type of restoration for your +// query in the request body's SelectParameters structure. You can use expressions +// like the following examples. The following expression returns all records +// from the specified object. SELECT * FROM Object Assuming that you are +// not using any headers for data stored in the object, you can specify columns +// with positional headers. SELECT s._1, s._2 FROM Object s WHERE s._3 > +// 100 If you have headers and you set the fileHeaderInfo in the CSV structure +// in the request body to USE, you can specify headers in the query. (If +// you set the fileHeaderInfo field to IGNORE, the first row is skipped for +// the query.) You cannot mix ordinal positions with header column names. +// SELECT s.Id, s.FirstName, s.SSN FROM S3Object s +// +// For more information about using SQL with Glacier Select restore, see SQL +// Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// When making a select request, you can also do the following: +// +// * To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// +// * Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded +// query results. +// +// The following are additional important facts about the select feature: +// +// * The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// policy. +// +// * You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate requests. +// +// * Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409. +// +// Restoring Archives +// +// Objects in the GLACIER and DEEP_ARCHIVE storage classes are archived. To +// access an archived object, you must first initiate a restore request. This +// restores a temporary copy of the archived object. In a restore request, you +// specify the number of days that you want the restored copy to exist. After +// the specified period, Amazon S3 deletes the temporary copy but the object +// remains archived in the GLACIER or DEEP_ARCHIVE storage class that object +// was restored from. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// The time it takes restore jobs to finish depends on which storage class the +// object is being restored from and which data access tier you specify. +// +// When restoring an archived object (or using a select request), you can specify +// one of the following data access tier options in the Tier element of the +// request body: +// +// * Expedited - Expedited retrievals allow you to quickly access your data +// stored in the GLACIER storage class when occasional urgent requests for +// a subset of archives are required. For all but the largest archived objects +// (250 MB+), data accessed using Expedited retrievals are typically made +// available within 1–5 minutes. Provisioned capacity ensures that retrieval +// capacity for Expedited retrievals is available when you need it. Expedited +// retrievals and provisioned capacity are not available for the DEEP_ARCHIVE +// storage class. +// +// * Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for the GLACIER +// and DEEP_ARCHIVE retrieval requests that do not specify the retrieval +// option. Standard retrievals typically complete within 3-5 hours from the +// GLACIER storage class and typically complete within 12 hours from the +// DEEP_ARCHIVE storage class. +// +// * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval +// option, enabling you to retrieve large amounts, even petabytes, of data +// inexpensively in a day. Bulk retrievals typically complete within 5-12 +// hours from the GLACIER storage class and typically complete within 48 +// hours from the DEEP_ARCHIVE storage class. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. You upgrade the speed of an in-progress +// restoration by issuing another restore request to the same object, setting +// a new Tier request element. When issuing a request to upgrade the restore +// tier, you must choose a tier that is faster than the tier that the in-progress +// restore is using. You must not change any other parameters, such as the Days +// request element. For more information, see Upgrading the Speed of an In-Progress +// Restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Responses +// +// A successful operation returns either the 200 OK or 202 Accepted status code. +// +// * If the object copy is not previously restored, then Amazon S3 returns +// 202 Accepted in the response. +// +// * If the object copy is previously restored, Amazon S3 returns 200 OK +// in the response. +// +// Special Errors +// +// * Code: RestoreAlreadyInProgress Cause: Object restore is already in progress. +// (This error does not apply to SELECT type requests.) HTTP Status Code: +// 409 Conflict SOAP Fault Code Prefix: Client +// +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: Glacier expedited +// retrievals are currently not available. Try again later. (Returned if +// there is insufficient capacity to process the Expedited request. This +// error applies only to Expedited retrievals and not to Standard or Bulk +// retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A +// +// Related Resources +// +// * PutBucketLifecycleConfiguration +// +// * GetBucketNotificationConfiguration +// +// * SQL Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6753,7 +9432,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier +// This operation is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -6816,20 +9495,104 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) + + es := newSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) - req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) return } // SelectObjectContent API operation for Amazon Simple Storage Service. // // This operation filters the contents of an Amazon S3 object based on a simple -// Structured Query Language (SQL) statement. In the request, along with the -// SQL expression, you must also specify a data serialization format (JSON or -// CSV) of the object. Amazon S3 uses this to parse object data into records, -// and returns only records that match the specified SQL expression. You must -// also specify the data serialization format for the response. +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For more information about using SQL with Amazon S3 Select, see SQL Reference +// for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Permissions +// +// You must have s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// * CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// * UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// * GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// * Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject. For more +// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon Simple Storage Service Developer Guide. For objects that +// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer +// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see RESTSelectObjectAppendix . +// +// GetObject Support +// +// The SelectObjectContent operation does not support the following GetObject +// functionality. For more information, see GetObject. +// +// * Range: While you can specify a scan range for a Amazon S3 Select request, +// see SelectObjectContentRequest$ScanRange in the request parameters below, +// you cannot specify the range of bytes of an object to return. +// +// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot +// specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. +// For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) +// in the Amazon Simple Storage Service Developer Guide. +// +// Special Errors +// +// For a list of special errors for this operation and for general information +// about Amazon S3 errors and a list of error codes, see ErrorResponses +// +// Related Resources +// +// * GetObject +// +// * GetBucketLifecycleConfiguration +// +// * PutBucketLifecycleConfiguration // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6859,6 +9622,147 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject return out, req.Send() } +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +func newSelectObjectContentEventStream() *SelectObjectContentEventStream { + return &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +// +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + const opUploadPart = "UploadPart" // UploadPartRequest generates a "aws/request.Request" representing the @@ -6905,12 +9809,87 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // Uploads a part in a multipart upload. // +// In this operation, you provide part data in your request. However, you have +// an option to specify your existing Amazon S3 object as a data source for +// the part you are uploading. To upload a part from an existing object, you +// use the UploadPartCopy operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload) before you +// can upload any part. In response to your initiate request, Amazon S3 returns +// an upload ID, a unique identifier, that you must include in your upload part +// request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. Each +// part must be at least 5 MB in size, except the last part. There is no size +// limit on the last part of your multipart upload. +// +// To ensure that data is not corrupted when traversing the network, specify +// the Content-MD5 header in the upload part request. Amazon S3 checks the part +// data against the provided MD5 value. If they do not match, Amazon S3 returns +// an error. +// // Note: After you initiate multipart upload and upload one or more parts, you // must either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. // +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon Simple Storage Service Developer Guide . +// +// For information on the permissions required to use the multipart upload API, +// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// You can optionally request server-side encryption where Amazon S3 encrypts +// your data as it writes it to disks in its data centers and decrypts it for +// you when you access it. You have the option of providing your own encryption +// key, or you can use the AWS managed encryption keys. If you choose to provide +// your own encryption key, the request headers you provide in the request must +// match the headers you used in the request to initiate the upload by using +// CreateMultipartUpload. For more information, go to Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side encryption is supported by the S3 Multipart Upload actions. Unless +// you are using a customer-provided encryption key, you don't need to specify +// the encryption parameters in each UploadPart request. Instead, you only need +// to specify the server-side encryption parameters in the initial Initiate +// Multipart request. For more information, see CreateMultipartUpload. +// +// If you requested server-side encryption using a customer-provided encryption +// key in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following headers. +// +// * x-amz-server-side​-encryption​-customer-algorithm +// +// * x-amz-server-side​-encryption​-customer-key +// +// * x-amz-server-side​-encryption​-customer-key-MD5 +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault Code +// Prefix: Client +// +// Related Resources +// +// * CreateMultipartUpload +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6983,7 +9962,94 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // UploadPartCopy API operation for Amazon Simple Storage Service. // -// Uploads a part by copying data from an existing object as data source. +// Uploads a part by copying data from an existing object as data source. You +// specify the data source by adding the request header x-amz-copy-source in +// your request and a byte range by adding the request header x-amz-copy-source-range +// in your request. +// +// The minimum allowable part size for a multipart upload is 5 MB. For more +// information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Instead of using an existing object as part data, you might use the UploadPart +// operation and provide data in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request. Amazon S3 returns a unique identifier, the upload +// ID, that you must include in your upload part request. +// +// For more information about using the UploadPartCopy operation, see the following: +// +// * For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about permissions required to use the multipart upload +// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about copying objects using a single atomic operation +// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// * For information about using server-side encryption with customer-provided +// encryption keys with the UploadPartCopy operation, see CopyObject and +// UploadPart. +// +// Note the following additional considerations about the request headers x-amz-copy-source-if-match, +// x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and +// x-amz-copy-source-if-modified-since: +// +// * Consideration 1 - If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request as follows: x-amz-copy-source-if-match +// condition evaluates to true, and; x-amz-copy-source-if-unmodified-since +// condition evaluates to false; Amazon S3 returns 200 OK and copies the +// data. +// +// * Consideration 2 - If both of the x-amz-copy-source-if-none-match and +// x-amz-copy-source-if-modified-since headers are present in the request +// as follows: x-amz-copy-source-if-none-match condition evaluates to false, +// and; x-amz-copy-source-if-modified-since condition evaluates to true; +// Amazon S3 returns 412 Precondition Failed response code. +// +// Versioning +// +// If your bucket has versioning enabled, you could have multiple versions of +// the same object. By default, x-amz-copy-source identifies the current version +// of the object to copy. If the current version is a delete marker and you +// don't specify a versionId in the x-amz-copy-source, Amazon S3 returns a 404 +// error, because the object does not exist. If you specify versionId in the +// x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns +// an HTTP 400 error, because you are not allowed to specify a delete marker +// as a version for the x-amz-copy-source. +// +// You can optionally specify a specific version of the source object to copy +// by adding the versionId subresource as shown in the following example: +// +// x-amz-copy-source: /bucket/object?versionId=version id +// +// Special Errors +// +// * Code: NoSuchUpload Cause: The specified multipart upload does not exist. +// The upload ID might be invalid, or the multipart upload might have been +// aborted or completed. HTTP Status Code: 404 Not Found +// +// * Code: InvalidRequest Cause: The specified copy source is not supported +// as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// Related Resources +// +// * CreateMultipartUpload +// +// * UploadPart +// +// * CompleteMultipartUpload +// +// * AbortMultipartUpload +// +// * ListParts +// +// * ListMultipartUploads // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7045,7 +10111,14 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI type AbortMultipartUploadInput struct { _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` - // Name of the bucket to which the multipart upload was initiated. + // The bucket name to which the upload was taking place. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -7055,10 +10128,11 @@ type AbortMultipartUploadInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID that identifies the multipart upload. @@ -7133,6 +10207,20 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -7335,9 +10423,6 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { // Specifies the configuration and any analyses for the analytics filter of // an Amazon S3 bucket. -// -// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) -// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -7456,6 +10541,9 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -7518,6 +10606,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } +// Contains information about where to publish the analytics results. type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` @@ -7596,6 +10685,8 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } +// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name +// is globally unique, and the namespace is shared by all AWS accounts. type Bucket struct { _ struct{} `type:"structure"` @@ -7679,6 +10770,7 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } +// Container for logging status information. type BucketLoggingStatus struct { _ struct{} `type:"structure"` @@ -7727,7 +10819,8 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin type CORSConfiguration struct { _ struct{} `type:"structure"` - // A set of allowed origins and methods. + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` @@ -7859,7 +10952,8 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Describes how a CSV-formatted input object is formatted. +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. type CSVInput struct { _ struct{} `type:"structure"` @@ -7868,24 +10962,45 @@ type CSVInput struct { // to TRUE may lower performance. AllowQuotedRecordDelimiter *bool `type:"boolean"` - // The single character used to indicate a row should be ignored when present - // at the start of a row. + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. Comments *string `type:"string"` - // The value used to separate individual fields in a record. + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // Describes the first line of input. Valid values: None, Ignore, Use. + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` - // Value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV QuoteCharacter *string `type:"string"` - // The single character used for escaping the quote character inside an already - // escaped value. + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". QuoteEscapeCharacter *string `type:"string"` - // The value used to separate individual records. + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -7941,24 +11056,33 @@ func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { return s } -// Describes how CSV-formatted results are formatted. +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. type CSVOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual fields in a record. + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. FieldDelimiter *string `type:"string"` - // The value used for escaping where the field delimiter is part of the value. + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". QuoteCharacter *string `type:"string"` - // Th single character used for escaping the quote character inside an already + // The single character used for escaping the quote character inside an already // escaped value. QuoteEscapeCharacter *string `type:"string"` - // Indicates whether or not all output fields should be quoted. + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. QuoteFields *string `type:"string" enum:"QuoteFields"` - // The value used to separate individual records. + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string `type:"string"` } @@ -8002,9 +11126,12 @@ func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { return s } +// Container for specifying the AWS Lambda notification configuration. type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. CloudFunction *string `type:"string"` // The bucket event for which to send notifications. @@ -8012,12 +11139,14 @@ type CloudFunctionConfiguration struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // Bucket events for which to send notifications. Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The role supporting the invocation of the Lambda function InvocationRole *string `type:"string"` } @@ -8061,9 +11190,15 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. type CommonPrefix struct { _ struct{} `type:"structure"` + // Container for the specified common prefix. Prefix *string `type:"string"` } @@ -8086,20 +11221,28 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { type CompleteMultipartUploadInput struct { _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The container for the multipart upload request information. MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // ID for the initiated multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -8176,35 +11319,61 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` + // The name of the bucket that contains the newly created object. Bucket *string `type:"string"` - // Entity tag of the object. + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. ETag *string `type:"string"` // If the object expiration is configured, this will contain the expiration // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + // The object key of the newly created object. Key *string `min:"1" type:"string"` + // The URI that identifies the newly created object. Location *string `type:"string"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an Amazon S3-managed + // encryption key or an AWS KMS customer master key (CMK) in your initiate multipart + // upload request, the response includes this header. It confirms the encryption + // algorithm that Amazon S3 used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // Version of the object. + // Version ID of the newly created object, in case the bucket has versioning + // turned on. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -8279,9 +11448,11 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } +// The container for the completed multipart upload details. type CompletedMultipartUpload struct { _ struct{} `type:"structure"` + // Array of CompletedPart data types. Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` } @@ -8301,6 +11472,7 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } +// Details of the parts that were uploaded. type CompletedPart struct { _ struct{} `type:"structure"` @@ -8334,7 +11506,10 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Specifies a condition that must be met for a redirect to apply. +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. type Condition struct { _ struct{} `type:"structure"` @@ -8403,12 +11578,19 @@ func (s *ContinuationEvent) UnmarshalEvent( return nil } +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + type CopyObjectInput struct { _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the destination bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -8448,7 +11630,8 @@ type CopyObjectInput struct { // Copies the object if it hasn't been modified since the specified time. CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt @@ -8457,8 +11640,8 @@ type CopyObjectInput struct { CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. @@ -8476,6 +11659,8 @@ type CopyObjectInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // The key of the destination object. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -8489,31 +11674,33 @@ type CopyObjectInput struct { // Specifies whether you want to apply a Legal Hold to the copied object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the copied object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want the copied object's object lock to expire. + // The date and time when you want the copied object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the AWS KMS Encryption Context to use for object encryption. The @@ -8523,12 +11710,14 @@ type CopyObjectInput struct { // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // or using SigV4. For information about configuring using any of the officially + // supported AWS SDKs and AWS CLI, see Specifying the Signature Version in Request + // Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. @@ -8536,7 +11725,7 @@ type CopyObjectInput struct { // The tag-set for the object destination object this value must be used in // conjunction with the TaggingDirective. The tag-set must be encoded as URL - // Query parameters + // Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // Specifies whether the object tag-set are copied from the source object or @@ -8827,11 +12016,27 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` + // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` + // Version of the copied object in the destination bucket. CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` // If the object expiration is configured, the response includes this header. @@ -8847,7 +12052,7 @@ type CopyObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` @@ -8856,12 +12061,13 @@ type CopyObjectOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version ID of the newly created copy. @@ -8938,11 +12144,16 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } +// Container for all response elements. type CopyObjectResult struct { _ struct{} `type:"structure"` + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. The source and destination ETag + // is identical for a successfully copied object. ETag *string `type:"string"` + // Returns the date that the object was last modified. LastModified *time.Time `type:"timestamp"` } @@ -8968,6 +12179,7 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } +// Container for all response elements. type CopyPartResult struct { _ struct{} `type:"structure"` @@ -9000,11 +12212,12 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } +// The configuration information for the bucket. type CreateBucketConfiguration struct { _ struct{} `type:"structure"` - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). + // Specifies the Region where the bucket will be created. If you don't specify + // a Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -9030,9 +12243,12 @@ type CreateBucketInput struct { // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + // The name of the bucket to create. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The configuration information for the bucket. CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -9051,8 +12267,7 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Specifies whether you want Amazon S3 object lock to be enabled for the new - // bucket. + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } @@ -9146,6 +12361,9 @@ func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketI type CreateBucketOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. Location *string `location:"header" locationName:"Location" type:"string"` } @@ -9171,6 +12389,8 @@ type CreateMultipartUploadInput struct { // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + // The name of the bucket to which to initiate the upload + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -9206,6 +12426,8 @@ type CreateMultipartUploadInput struct { // Allows grantee to write the ACL for the applicable object. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Object key for which the multipart upload is to be initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -9215,31 +12437,33 @@ type CreateMultipartUploadInput struct { // Specifies whether you want to apply a Legal Hold to the uploaded object. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // Specifies the object lock mode that you want to apply to the uploaded object. + // Specifies the Object Lock mode that you want to apply to the uploaded object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // Specifies the date and time when you want the object lock to expire. + // Specifies the date and time when you want the Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the AWS KMS Encryption Context to use for object encryption. The @@ -9247,20 +12471,22 @@ type CreateMultipartUploadInput struct { // encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // Specifies the ID of the symmetric customer managed AWS KMS CMK to use for + // object encryption. All GET and PUT requests for an object protected by AWS + // KMS will fail if not made via SSL or using SigV4. For information about configuring + // using any of the officially supported AWS SDKs and AWS CLI, see Specifying + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object @@ -9477,17 +12703,47 @@ func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *Creat return s } +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Bucket *string `locationName:"Bucket" type:"string"` // Object key for which the multipart upload was initiated. @@ -9503,7 +12759,7 @@ type CreateMultipartUploadOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` @@ -9512,12 +12768,13 @@ type CreateMultipartUploadOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // ID for the initiated multipart upload. @@ -9607,7 +12864,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// The container element for specifying the default object lock retention settings +// The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. type DefaultRetention struct { _ struct{} `type:"structure"` @@ -9615,7 +12872,7 @@ type DefaultRetention struct { // The number of days that you want to specify for the default retention period. Days *int64 `type:"integer"` - // The default object lock retention mode you want to apply to new objects placed + // The default Object Lock retention mode you want to apply to new objects placed // in the specified bucket. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` @@ -9651,9 +12908,12 @@ func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { return s } +// Container for the objects to delete. type Delete struct { _ struct{} `type:"structure"` + // The objects to delete. + // // Objects is a required field Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` @@ -9769,6 +13029,20 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketA return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -9786,6 +13060,8 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { type DeleteBucketCorsInput struct { _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + // Specifies the bucket whose cors configuration is being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9829,6 +13105,20 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -9892,6 +13182,20 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -9909,6 +13213,8 @@ func (s DeleteBucketEncryptionOutput) GoString() string { type DeleteBucketInput struct { _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + // Specifies the bucket being deleted. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -9952,6 +13258,20 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationInput struct { _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` @@ -10014,6 +13334,20 @@ func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketI return s } +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -10031,6 +13365,8 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { type DeleteBucketLifecycleInput struct { _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + // The bucket name of the lifecycle to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10074,6 +13410,20 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -10150,6 +13500,20 @@ func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMet return s } +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -10181,6 +13545,8 @@ func (s DeleteBucketOutput) GoString() string { type DeleteBucketPolicyInput struct { _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10224,6 +13590,20 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -10243,9 +13623,6 @@ type DeleteBucketReplicationInput struct { // The bucket name. // - // It can take a while to propagate the deletion of a replication configuration - // to all Amazon S3 systems. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10289,6 +13666,20 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -10306,6 +13697,8 @@ func (s DeleteBucketReplicationOutput) GoString() string { type DeleteBucketTaggingInput struct { _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + // The bucket that has the tag set to be removed. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10349,6 +13742,20 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -10366,6 +13773,8 @@ func (s DeleteBucketTaggingOutput) GoString() string { type DeleteBucketWebsiteInput struct { _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + // The bucket name for which you want to remove the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -10409,6 +13818,20 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -10423,6 +13846,7 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } +// Information about the delete marker. type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -10436,6 +13860,7 @@ type DeleteMarkerEntry struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // The account that created the delete marker.> Owner *Owner `type:"structure"` // Version ID of an object. @@ -10482,11 +13907,21 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Specifies whether Amazon S3 should replicate delete makers. +// Specifies whether Amazon S3 replicates the delete markers. If you specify +// a Filter, you must specify this element. However, in the latest version of +// replication configuration (when Filter is specified), Amazon S3 doesn't replicate +// delete markers. Therefore, the DeleteMarkerReplication element can contain +// only Disabled. For an example configuration, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, Amazon +// S3 handled replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). type DeleteMarkerReplication struct { _ struct{} `type:"structure"` - // The status of the delete marker replication. + // Indicates whether to replicate delete markers. // // In the current implementation, Amazon S3 doesn't replicate the delete markers. // The status must be Disabled. @@ -10512,24 +13947,38 @@ func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { type DeleteObjectInput struct { _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + // The bucket name of the bucket containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Key name of the object to delete. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -10611,6 +14060,20 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -10658,9 +14121,20 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { type DeleteObjectTaggingInput struct { _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + // The bucket name containing the objects from which to remove the tags. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -10725,6 +14199,20 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -10751,25 +14239,39 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO type DeleteObjectsInput struct { _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + // The bucket name containing the objects to delete. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies whether you want to delete this object even if it has a Governance-type - // object lock in place. You must have sufficient permissions to perform this + // Object Lock in place. You must have sufficient permissions to perform this // operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Container for the request. + // // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -10844,11 +14346,29 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeleteObjectsOutput struct { _ struct{} `type:"structure"` + // Container element for a successful delete. It identifies the object that + // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` + // Container for a failed delete operation that describes the object that Amazon + // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the @@ -10932,6 +14452,20 @@ func (s *DeletePublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type DeletePublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -10946,15 +14480,24 @@ func (s DeletePublicAccessBlockOutput) GoString() string { return s.String() } +// Information about the deleted object. type DeletedObject struct { _ struct{} `type:"structure"` + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. In a simple DELETE, this header indicates + // whether (true) or not (false) a delete marker was created. DeleteMarker *bool `type:"boolean"` + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. DeleteMarkerVersionId *string `type:"string"` + // The name of the deleted object. Key *string `min:"1" type:"string"` + // The version ID of the deleted object. VersionId *string `type:"string"` } @@ -10993,7 +14536,7 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { } // Specifies information about where to publish analysis or configuration results -// for an Amazon S3 bucket. +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). type Destination struct { _ struct{} `type:"structure"` @@ -11008,17 +14551,12 @@ type Destination struct { // direct Amazon S3 to change replica ownership to the AWS account that owns // the destination bucket by specifying the AccessControlTranslation property, // this is the account ID of the destination bucket owner. For more information, - // see Cross-Region Replication Additional Configuration: Change Replica Owner - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in - // the Amazon Simple Storage Service Developer Guide. + // see Replication Additional Configuration: Changing the Replica Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon Simple Storage Service Developer Guide. Account *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to - // store replicas of the object identified by the rule. - // - // A replication configuration can replicate objects to only one destination - // bucket. If there are multiple rules in your replication configuration, all - // rules must specify the same destination bucket. + // store the results. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -11027,6 +14565,16 @@ type Destination struct { // is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + // A container specifying replication metrics-related settings enabling metrics + // and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified + // together with a ReplicationTime block. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + // The storage class to use when replicating objects, such as standard or reduced // redundancy. By default, Amazon S3 uses the storage class of the source object // to create the object replica. @@ -11058,6 +14606,16 @@ func (s *Destination) Validate() error { invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) } } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11096,19 +14654,30 @@ func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *De return s } +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Describes the server-side encryption that will be applied to the restore -// results. +// Contains the type of server-side encryption used. type Encryption struct { _ struct{} `type:"structure"` // The server-side encryption algorithm used when storing job results in Amazon - // S3 (e.g., AES256, aws:kms). + // S3 (for example, AES256, aws:kms). // // EncryptionType is a required field EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` @@ -11117,8 +14686,11 @@ type Encryption struct { // the encryption context for the restore results. KMSContext *string `type:"string"` - // If the encryption type is aws:kms, this optional value specifies the AWS - // KMS key ID to use for encryption of job results. + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric customer managed AWS KMS CMK to use for encryption of job results. + // Amazon S3 only supports symmetric CMKs. For more information, see Using Symmetric + // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. KMSKeyId *string `type:"string" sensitive:"true"` } @@ -11168,8 +14740,12 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption { type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. - // Amazon S3 uses this key to encrypt replica objects. + // Specifies the ID (Key ARN or Alias ARN) of the customer managed customer + // master key (CMK) stored in AWS Key Management Service (KMS) for the destination + // bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only + // supports symmetric customer managed CMKs. For more information, see Using + // Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. ReplicaKmsKeyID *string `type:"string"` } @@ -11189,6 +14765,9 @@ func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfig return s } +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. type EndEvent struct { _ struct{} `locationName:"EndEvent" type:"structure"` } @@ -11215,15 +14794,380 @@ func (s *EndEvent) UnmarshalEvent( return nil } +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. type Error struct { _ struct{} `type:"structure"` + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. + // + // Amazon S3 error codes + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your AWS account + // that prevents the operation from completing successfully. Contact AWS + // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact AWS Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all AWS + // Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The AWS access key ID you provided + // does not exist in our records. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The operation is not valid for + // the current state of the object. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact AWS Support for further assistance. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact AWS Support for more information. + // HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact AWS Support for more information. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional operation + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your AWS secret access + // key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Reduce your request rate. HTTP + // Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client Code *string `type:"string"` + // The error key. Key *string `min:"1" type:"string"` + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. Message *string `type:"string"` + // The version ID of the error. VersionId *string `type:"string"` } @@ -11261,6 +15205,7 @@ func (s *Error) SetVersionId(v string) *Error { return s } +// The error information. type ErrorDocument struct { _ struct{} `type:"structure"` @@ -11302,6 +15247,45 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument { return s } +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 Developer Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + // Specifies the Amazon S3 object key name to filter on and whether to filter // on the suffix or prefix of the key name. type FilterRule struct { @@ -11388,6 +15372,20 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -11414,6 +15412,8 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA type GetBucketAclInput struct { _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + // Specifies the S3 bucket whose ACL is being requested. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11457,12 +15457,27 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -11550,6 +15565,20 @@ func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyti return s } +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -11576,6 +15605,8 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana type GetBucketCorsInput struct { _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + // The bucket name for which to get the cors configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11619,9 +15650,25 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` } @@ -11690,6 +15737,20 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` @@ -11775,6 +15836,20 @@ func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInvento return s } +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -11801,6 +15876,8 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv type GetBucketLifecycleConfigurationInput struct { _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11844,9 +15921,24 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` } @@ -11869,6 +15961,8 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge type GetBucketLifecycleInput struct { _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + // The name of the bucket for which to get the lifecycle information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11912,9 +16006,24 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` + // Container for a lifecycle rule. Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` } @@ -11937,6 +16046,8 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput type GetBucketLocationInput struct { _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + // The name of the bucket for which to get the location. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -11980,9 +16091,25 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -12005,6 +16132,8 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca type GetBucketLoggingInput struct { _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + // The bucket name for which to get the logging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12048,6 +16177,20 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` @@ -12136,6 +16279,20 @@ func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsCo return s } +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -12162,7 +16319,7 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics type GetBucketNotificationConfigurationRequest struct { _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - // Name of the bucket to get the notification configuration for. + // Name of the bucket for which to get the notification configuration // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12207,9 +16364,25 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyInput struct { _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + // The bucket name for which to get the bucket policy. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12253,6 +16426,20 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -12324,6 +16511,20 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketPolicyStatusOutput struct { _ struct{} `type:"structure" payload:"PolicyStatus"` @@ -12350,6 +16551,8 @@ func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucke type GetBucketReplicationInput struct { _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + // The bucket name for which to get the replication information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12393,6 +16596,20 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -12420,6 +16637,8 @@ func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationC type GetBucketRequestPaymentInput struct { _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + // The name of the bucket for which to get the payment request configuration + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12463,6 +16682,20 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -12489,6 +16722,8 @@ func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaym type GetBucketTaggingInput struct { _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + // The name of the bucket for which to get the tagging information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12532,9 +16767,25 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -12558,6 +16809,8 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { type GetBucketVersioningInput struct { _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + // The name of the bucket for which to get the versioning information. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12601,6 +16854,20 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -12638,6 +16905,8 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp type GetBucketWebsiteInput struct { _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + // The bucket name for which to get the website configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -12681,17 +16950,34 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` // Specifies the redirect behavior of all requests to a website endpoint of // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -12732,16 +17018,28 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb type GetObjectAclInput struct { _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + // The bucket name that contains the object for which to get the ACL information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The key of the object for which to get the ACL information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -12811,12 +17109,27 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectAclOutput struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` // If present, indicates that the requester was successfully charged for the @@ -12855,6 +17168,15 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { type GetObjectInput struct { _ struct{} `locationName:"GetObjectRequest" type:"structure"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12874,6 +17196,8 @@ type GetObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // Key of the object to get. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -12883,13 +17207,14 @@ type GetObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Sets the Cache-Control header of the response. @@ -12910,19 +17235,20 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -13089,10 +17415,32 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLegalHoldInput struct { _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` - // The bucket containing the object whose Legal Hold status you want to retrieve. + // The bucket name containing the object whose Legal Hold status you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13102,10 +17450,11 @@ type GetObjectLegalHoldInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object whose Legal Hold status you want to retrieve. @@ -13175,6 +17524,20 @@ func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInpu return s } +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` @@ -13201,7 +17564,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje type GetObjectLockConfigurationInput struct { _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` - // The bucket whose object lock configuration you want to retrieve. + // The bucket whose Object Lock configuration you want to retrieve. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13246,10 +17609,24 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectLockConfigurationOutput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` - // The specified bucket's object lock configuration. + // The specified bucket's Object Lock configuration. ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` } @@ -13272,6 +17649,7 @@ func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectL type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Object data. @@ -13305,11 +17683,11 @@ type GetObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -13321,6 +17699,10 @@ type GetObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -13333,15 +17715,17 @@ type GetObjectOutput struct { // returned if you have permission to view an object's legal hold status. ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode currently in place for this object. + // The Object Lock mode currently in place for this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's object lock will expire. + // The date and time when this object's Object Lock will expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the @@ -13358,18 +17742,21 @@ type GetObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for Standard storage class objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The number of tags, if any, on the object. @@ -13583,7 +17970,15 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput type GetObjectRetentionInput struct { _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` - // The bucket containing the object whose retention settings you want to retrieve. + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13593,10 +17988,11 @@ type GetObjectRetentionInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID for the object whose retention settings you want to retrieve. @@ -13666,6 +18062,20 @@ func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInpu return s } +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectRetentionOutput struct { _ struct{} `type:"structure" payload:"Retention"` @@ -13692,12 +18102,24 @@ func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObje type GetObjectTaggingInput struct { _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + // The bucket name containing the object for which to get the tagging information. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which to get the tagging information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -13758,12 +18180,29 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` + // Contains the tag set. + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + // The versionId of the object for which you got the tagging information. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -13792,16 +18231,22 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput type GetObjectTorrentInput struct { _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + // The name of the bucket containing the object for which to get the torrent + // files. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The object key for which to get the information. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` } @@ -13862,9 +18307,24 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` + // A Bencoded dictionary as defined by the BitTorrent specification Body io.ReadCloser `type:"blob"` // If present, indicates that the requester was successfully charged for the @@ -13943,6 +18403,20 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type GetPublicAccessBlockOutput struct { _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` @@ -13967,6 +18441,7 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public return s } +// Container for Glacier job parameters. type GlacierJobParameters struct { _ struct{} `type:"structure"` @@ -14005,9 +18480,11 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } +// Container for grant information. type Grant struct { _ struct{} `type:"structure"` + // The person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. @@ -14051,6 +18528,7 @@ func (s *Grant) SetPermission(v string) *Grant { return s } +// Container for the person being granted permissions. type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -14128,6 +18606,8 @@ func (s *Grantee) SetURI(v string) *Grantee { type HeadBucketInput struct { _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -14171,6 +18651,20 @@ func (s *HeadBucketInput) getBucket() (v string) { return *s.Bucket } +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -14188,6 +18682,8 @@ func (s HeadBucketOutput) GoString() string { type HeadObjectInput struct { _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + // The name of the bucket containing the object. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14207,6 +18703,8 @@ type HeadObjectInput struct { // otherwise return a 412 (precondition failed). IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + // The object key. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -14217,28 +18715,30 @@ type HeadObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. Range *string `location:"header" locationName:"Range" type:"string"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // VersionId used to reference a specific version of the object. @@ -14369,9 +18869,24 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type HeadObjectOutput struct { _ struct{} `type:"structure"` + // Indicates that a range of bytes was specified. AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` // Specifies caching behavior along the request/reply chain. @@ -14399,11 +18914,11 @@ type HeadObjectOutput struct { DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL + // of a resource found at a URL. ETag *string `location:"header" locationName:"ETag" type:"string"` // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs + // includes this header. It includes the expiry-date and rule-id key-value pairs // providing object expiration information. The value of the rule-id is URL // encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` @@ -14415,6 +18930,10 @@ type HeadObjectOutput struct { LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` // This is set to the number of metadata entries not returned in x-amz-meta @@ -14423,26 +18942,69 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - // The Legal Hold status for the specified object. + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode currently in place for this object. + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when this object's object lock expires. + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket where Amazon S3 stores object replicas. When you request + // an object (GetObject) or object metadata (HeadObject) from these buckets, + // Amazon S3 will return the x-amz-replication-status header in the response + // as follows: + // + // * If requesting an object from the source bucket — Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from the destination bucket — Amazon S3 will + // return the x-amz-replication-status header with value REPLICA if the object + // in your request is a replica that Amazon S3 created. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time - // of the restored object copy. + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, @@ -14451,18 +19013,25 @@ type HeadObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If the object is stored using server-side encryption either with an AWS KMS + // customer master key (CMK) or an Amazon S3-managed encryption key, the response + // includes this header with the value of the server-side encryption algorithm + // used when storing this object in Amazon S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // Version of the object. @@ -14652,13 +19221,15 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } +// Container for the Suffix element. type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. // // Suffix is a required field Suffix *string `type:"string" required:"true"` @@ -14693,6 +19264,7 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } +// Container element that identifies who initiated the multipart upload. type Initiator struct { _ struct{} `type:"structure"` @@ -14913,6 +19485,7 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } +// Specifies the inventory configuration for an Amazon S3 bucket. type InventoryDestination struct { _ struct{} `type:"structure"` @@ -14962,10 +19535,10 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin type InventoryEncryption struct { _ struct{} `type:"structure"` - // Specifies the use of SSE-KMS to encrypt delivered Inventory reports. + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` - // Specifies the use of SSE-S3 to encrypt delivered Inventory reports. + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` } @@ -15006,6 +19579,8 @@ func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { return s } +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. type InventoryFilter struct { _ struct{} `type:"structure"` @@ -15044,13 +19619,15 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` // The ID of the account that owns the destination bucket. AccountId *string `type:"string"` - // The Amazon resource name (ARN) of the bucket where inventory results will + // The Amazon Resource Name (ARN) of the bucket where inventory results will // be published. // // Bucket is a required field @@ -15137,6 +19714,7 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } +// Specifies the schedule for generating inventory results. type InventorySchedule struct { _ struct{} `type:"structure"` @@ -15175,6 +19753,7 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } +// Specifies JSON as object's input serialization format. type JSONInput struct { _ struct{} `type:"structure"` @@ -15198,6 +19777,7 @@ func (s *JSONInput) SetType(v string) *JSONInput { return s } +// Specifies JSON as request's output serialization format. type JSONOutput struct { _ struct{} `type:"structure"` @@ -15225,7 +19805,7 @@ func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { type KeyFilter struct { _ struct{} `type:"structure"` - // A list of containers for the key value pair that defines the criteria for + // A list of containers for the key-value pair that defines the criteria for // the filter rule. FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` } @@ -15323,9 +19903,12 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } +// Container for lifecycle rules. You can add as many as 1000 rules. type LifecycleConfiguration struct { _ struct{} `type:"structure"` + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // // Rules is a required field Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -15369,6 +19952,7 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } +// Container for the expiration for the lifecycle of the object. type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -15415,6 +19999,7 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } +// A lifecycle rule for individual objects in an Amazon S3 bucket. type LifecycleRule struct { _ struct{} `type:"structure"` @@ -15425,6 +20010,8 @@ type LifecycleRule struct { // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. @@ -15441,6 +20028,11 @@ type LifecycleRule struct { // period in the object's lifetime. NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is @@ -15455,6 +20047,7 @@ type LifecycleRule struct { // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // Specifies when an Amazon S3 object transitions to a specified storage class. Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` } @@ -15546,6 +20139,7 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` + // Prefix identifying one or more objects to which the rule applies. Prefix *string `type:"string"` // All of these tags must exist in the object's tag set in order for the rule @@ -15718,13 +20312,28 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` // The list of analytics configurations for a bucket. AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` - // The ContinuationToken that represents where this request began. + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. ContinuationToken *string `type:"string"` // Indicates whether the returned list of analytics configurations is complete. @@ -15832,6 +20441,20 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -15842,8 +20465,9 @@ type ListBucketInventoryConfigurationsOutput struct { // The list of inventory configurations for a bucket. InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` - // Indicates whether the returned list of inventory configurations is truncated - // in this response. A value of true indicates that the list is truncated. + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. IsTruncated *bool `type:"boolean"` // The marker used to continue this inventory configuration listing. Use the @@ -15946,6 +20570,20 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -16019,8 +20657,10 @@ func (s ListBucketsInput) GoString() string { type ListBucketsOutput struct { _ struct{} `type:"structure"` + // The list of buckets owned by the requestor. Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + // The owner of the buckets listed. Owner *Owner `type:"structure"` } @@ -16049,10 +20689,26 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { type ListMultipartUploadsInput struct { _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -16065,6 +20721,13 @@ type ListMultipartUploadsInput struct { // Together with upload-id-marker, this parameter specifies the multipart upload // after which listing should begin. + // + // If upload-id-marker is not specified, only the keys lexicographically greater + // than the specified key-marker will be included in the list. + // + // If upload-id-marker is specified, any multipart uploads for a key equal to + // the key-marker might also be included, provided those multipart uploads have + // upload IDs lexicographically greater than the specified upload-id-marker. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of multipart uploads, from 1 to 1,000, to return @@ -16073,12 +20736,16 @@ type ListMultipartUploadsInput struct { MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` // Lists in-progress uploads only for those keys that begin with the specified - // prefix. + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` } @@ -16157,17 +20824,42 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. EncodingType *string `type:"string" enum:"EncodingType"` // Indicates whether the returned list of multipart uploads is truncated. A @@ -16198,6 +20890,8 @@ type ListMultipartUploadsOutput struct { // Upload ID after which listing began. UploadIdMarker *string `type:"string"` + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` } @@ -16293,10 +20987,23 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti type ListObjectVersionsInput struct { _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + // The bucket name that contains the objects. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A delimiter is a character you use to group keys. + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` // Requests Amazon S3 to encode the object keys in the response and specifies @@ -16311,10 +21018,17 @@ type ListObjectVersionsInput struct { KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // contain fewer keys but will never contain more. If additional keys satisfy + // the search criteria, but were not returned because max-keys was exceeded, + // the response contains true. To return the additional + // keys, see key-marker and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - // Limits the response to keys that begin with the specified prefix. + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way you'd + // use a folder in a file system.) You can use prefix with delimiter to roll + // up numerous objects into a single result under CommonPrefixes. Prefix *string `location:"querystring" locationName:"prefix" type:"string"` // Specifies the object version you want to start listing from. @@ -16396,42 +21110,81 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Container for an object that is a delete marker. DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify encoding-type request parameter, Amazon S3 includes this element + // in the response, and returns encoded key name values in the following response + // elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the // rest of the results. IsTruncated *bool `type:"boolean"` - // Marks the last Key returned in a truncated response. + // Marks the last key returned in a truncated response. KeyMarker *string `type:"string"` + // Specifies the maximum number of objects to return. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` - // Use this value for the key marker request parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. NextKeyMarker *string `type:"string"` - // Use this value for the next version id marker parameter in a subsequent request. + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. NextVersionIdMarker *string `type:"string"` + // Selects objects that start with the value supplied by this parameter. Prefix *string `type:"string"` + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string `type:"string"` + // Container for version information. Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` } @@ -16526,6 +21279,8 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe type ListObjectsInput struct { _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + // The name of the bucket containing the objects. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -16631,26 +21386,65 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsOutput struct { _ struct{} `type:"structure"` + // All of the keys rolled up in a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. IsTruncated *bool `type:"boolean"` + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. Marker *string `type:"string"` + // The maximum number of keys returned in the response body. MaxKeys *int64 `type:"integer"` + // Bucket name. Name *string `type:"string"` // When response is truncated (the IsTruncated element value in the response @@ -16662,6 +21456,7 @@ type ListObjectsOutput struct { // subsequent request to get the next set of object keys. NextMarker *string `type:"string"` + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` } @@ -16738,14 +21533,21 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { type ListObjectsV2Input struct { _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` - // Name of the bucket to list. + // Bucket name to list. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // ContinuationToken indicates Amazon S3 that the list is being continued on // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // key. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` // A delimiter is a character you use to group keys. @@ -16756,7 +21558,7 @@ type ListObjectsV2Input struct { // The owner field is not present in listV2 by default, if you want to return // owner field with each key in the result then set the fetch owner field to - // true + // true. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` // Sets the maximum number of keys returned in the response. The response might @@ -16772,7 +21574,7 @@ type ListObjectsV2Input struct { RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // listing after this specified key. StartAfter can be any key in the bucket. StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` } @@ -16863,29 +21665,65 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListObjectsV2Output struct { _ struct{} `type:"structure"` + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // // CommonPrefixes contains all (if there are any) keys between Prefix and the - // next occurrence of the string specified by delimiter + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` // Metadata about each object returned. Contents []*Object `type:"list" flattened:"true"` - // ContinuationToken indicates Amazon S3 that the list is being continued on - // this bucket with a token. ContinuationToken is obfuscated and is not a real - // key + // If ContinuationToken was sent with the request, it is included in the response. ContinuationToken *string `type:"string"` - // A delimiter is a character you use to group keys. + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. Delimiter *string `type:"string"` - // Encoding type used by Amazon S3 to encode object keys in the response. + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. EncodingType *string `type:"string" enum:"EncodingType"` - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will @@ -16897,20 +21735,26 @@ type ListObjectsV2Output struct { // contain fewer keys but will never contain more. MaxKeys *int64 `type:"integer"` - // Name of the bucket to list. + // Bucket name. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string"` - // NextContinuationToken is sent when isTruncated is true which means there + // NextContinuationToken is sent when isTruncated is true, which means there // are more keys in the bucket that can be listed. The next list requests to // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken // is obfuscated and is not a real key NextContinuationToken *string `type:"string"` - // Limits the response to keys that begin with the specified prefix. + // Keys that begin with the indicated prefix. Prefix *string `type:"string"` - // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket + // If StartAfter was sent with the request, it is included in the response. StartAfter *string `type:"string"` } @@ -16999,9 +21843,20 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { type ListPartsInput struct { _ struct{} `locationName:"ListPartsRequest" type:"structure"` + // Name of the bucket to which the parts are being uploaded. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -17012,10 +21867,11 @@ type ListPartsInput struct { // part numbers will be listed. PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -17102,23 +21958,51 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type ListPartsOutput struct { _ struct{} `type:"structure"` - // Date when multipart upload will become eligible for abort operation by lifecycle. + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` - // Id of the lifecycle rule that makes a multipart upload eligible for abort - // operation. + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` // Name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` - // Identifies who initiated the multipart upload. + // Container element that identifies who initiated the multipart upload. If + // the initiator is an AWS account, this element provides the same information + // as the Owner element. If the initiator is an IAM User, this element provides + // the user ARN and display name. Initiator *Initiator `type:"structure"` - // Indicates whether the returned list of parts is truncated. + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. IsTruncated *bool `type:"boolean"` // Object key for which the multipart upload was initiated. @@ -17132,18 +22016,26 @@ type ListPartsOutput struct { // in a subsequent request. NextPartNumberMarker *int64 `type:"integer"` + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. Owner *Owner `type:"structure"` - // Part number after which listing begins. + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. PartNumberMarker *int64 `type:"integer"` + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. Parts []*Part `locationName:"Part" type:"list" flattened:"true"` // If present, indicates that the requester was successfully charged for the // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // The class of storage used to store the object. + // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded + // object. StorageClass *string `type:"string" enum:"StorageClass"` // Upload ID identifying the multipart upload whose parts are being listed. @@ -17251,7 +22143,8 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Describes an S3 location that will receive the results of the restore request. +// Describes an Amazon S3 location that will receive the results of the restore +// request. type Location struct { _ struct{} `type:"structure"` @@ -17266,8 +22159,7 @@ type Location struct { // The canned ACL to apply to the restore results. CannedACL *string `type:"string" enum:"ObjectCannedACL"` - // Describes the server-side encryption that will be applied to the restore - // results. + // Contains the type of server-side encryption used. Encryption *Encryption `type:"structure"` // The prefix that is prepended to the restore results for this request. @@ -17389,13 +22281,14 @@ type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. // You can have your logs delivered to any bucket that you own, including the // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should + // to deliver their logs to the same target bucket. In this case, you should // choose a different TargetPrefix for each source bucket so that the delivered // log files can be distinguished by key. // // TargetBucket is a required field TargetBucket *string `type:"string" required:"true"` + // Container for granting information. TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` // A prefix for all log object keys. If you store log files from multiple Amazon @@ -17464,8 +22357,10 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { type MetadataEntry struct { _ struct{} `type:"structure"` + // Name of the Object. Name *string `type:"string"` + // Value of the Object. Value *string `type:"string"` } @@ -17491,6 +22386,65 @@ func (s *MetadataEntry) SetValue(v string) *MetadataEntry { return s } +// A container specifying replication metrics-related settings enabling metrics +// and Amazon S3 events for S3 Replication Time Control (S3 RTC). Must be specified +// together with a ReplicationTime block. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + // + // EventThreshold is a required field + EventThreshold *ReplicationTimeValue `type:"structure" required:"true"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.EventThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("EventThreshold")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -17604,6 +22558,9 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// a tag, or a conjunction (MetricsAndOperator). type MetricsFilter struct { _ struct{} `type:"structure"` @@ -17667,6 +22624,7 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } +// Container for the MultipartUpload for the Amazon S3 object. type MultipartUpload struct { _ struct{} `type:"structure"` @@ -17679,6 +22637,7 @@ type MultipartUpload struct { // Key of the object for which the multipart upload was initiated. Key *string `min:"1" type:"string"` + // Specifies the owner of the object that is part of the multipart upload. Owner *Owner `type:"structure"` // The class of storage used to store the object. @@ -17778,8 +22737,8 @@ type NoncurrentVersionTransition struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` @@ -17898,10 +22857,17 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` + // Container for specifying the AWS Lambda notification configuration. CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` } @@ -17959,17 +22925,25 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } +// An object consists of data and its descriptive metadata. type Object struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of the object. ETag reflects only changes to + // the contents of an object, not its metadata. ETag *string `type:"string"` + // The name that you assign to an object. You use the object key to retrieve + // the object. Key *string `min:"1" type:"string"` + // The date the Object was Last Modified LastModified *time.Time `type:"timestamp"` + // The owner of the object Owner *Owner `type:"structure"` + // Size in bytes of the object Size *int64 `type:"integer"` // The class of storage used to store the object. @@ -18022,6 +22996,7 @@ func (s *Object) SetStorageClass(v string) *Object { return s } +// Object Identifier is unique value to identify objects. type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -18072,14 +23047,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// The container element for object lock configuration parameters. +// The container element for Object Lock configuration parameters. type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Indicates whether this bucket has an object lock configuration enabled. + // Indicates whether this bucket has an Object Lock configuration enabled. ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // The object lock rule in place for the specified object. + // The Object Lock rule in place for the specified object. Rule *ObjectLockRule `type:"structure"` } @@ -18136,7 +23111,7 @@ type ObjectLockRetention struct { // Indicates the Retention mode for the specified object. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` - // The date on which this object lock retention expires. + // The date on which this Object Lock Retention will expire. RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } @@ -18162,7 +23137,7 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti return s } -// The container element for an object lock rule. +// The container element for an Object Lock rule. type ObjectLockRule struct { _ struct{} `type:"structure"` @@ -18187,9 +23162,11 @@ func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRul return s } +// The version of an object. type ObjectVersion struct { _ struct{} `type:"structure"` + // The entity tag is an MD5 hash of that version of the object. ETag *string `type:"string"` // Specifies whether the object is (true) or is not (false) the latest version @@ -18202,6 +23179,7 @@ type ObjectVersion struct { // Date and time the object was last modified. LastModified *time.Time `type:"timestamp"` + // Specifies the owner of the object. Owner *Owner `type:"structure"` // Size in bytes of the object. @@ -18344,11 +23322,14 @@ func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { return s } +// Container for the owner's display name and ID. type Owner struct { _ struct{} `type:"structure"` + // Container for the display name of the owner. DisplayName *string `type:"string"` + // Container for the ID of the owner. ID *string `type:"string"` } @@ -18374,6 +23355,7 @@ func (s *Owner) SetID(v string) *Owner { return s } +// Container for Parquet. type ParquetInput struct { _ struct{} `type:"structure"` } @@ -18388,6 +23370,7 @@ func (s ParquetInput) GoString() string { return s.String() } +// Container for elements related to a part. type Part struct { _ struct{} `type:"structure"` @@ -18464,6 +23447,7 @@ func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { return s } +// This data type contains information about progress of an operation. type Progress struct { _ struct{} `type:"structure"` @@ -18505,6 +23489,7 @@ func (s *Progress) SetBytesScanned(v int64) *Progress { return s } +// This data type contains information about the progress event of an operation. type ProgressEvent struct { _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` @@ -18545,7 +23530,21 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } -// Specifies the Block Public Access configuration for an Amazon S3 bucket. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -18558,6 +23557,8 @@ type PublicAccessBlockConfiguration struct { // // * PUT Object calls fail if the request includes a public ACL. // + // * PUT Bucket calls fail if the request includes a public ACL. + // // Enabling this setting doesn't affect existing policies or ACLs. BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` @@ -18624,7 +23625,7 @@ func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *Publi type PutBucketAccelerateConfigurationInput struct { _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` - // Specifies the Accelerate Configuration you want to set for the bucket. + // Container for setting the transfer acceleration state. // // AccelerateConfiguration is a required field AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -18683,6 +23684,20 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -18706,6 +23721,8 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket to which to apply the ACL. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -18812,6 +23829,20 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -18907,6 +23938,20 @@ func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyti return s } +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -18924,12 +23969,14 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { type PutBucketCorsInput struct { _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + // Specifies the bucket impacted by the corsconfiguration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // (https://docs.aws.amazon.com/AmazonS3/latest/dev//cors.html) in the Amazon // Simple Storage Service Developer Guide. // // CORSConfiguration is a required field @@ -18989,6 +24036,20 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -19007,9 +24068,9 @@ type PutBucketEncryptionInput struct { _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information - // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket - // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS + // (SSE-KMS). For information about the Amazon S3 default encryption feature, + // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field @@ -19074,6 +24135,20 @@ func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *Serve return s } +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -19169,6 +24244,20 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19186,12 +24275,12 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { type PutBucketLifecycleConfigurationInput struct { _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + // The name of the bucket for which to set the configuration. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. - // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) - // in the Amazon Simple Storage Service Developer Guide. + // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19245,6 +24334,20 @@ func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *Buck return s } +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19265,6 +24368,7 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19318,6 +24422,20 @@ func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfigur return s } +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -19335,9 +24453,13 @@ func (s PutBucketLifecycleOutput) GoString() string { type PutBucketLoggingInput struct { _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + // The name of the bucket for which to set the logging parameters. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for logging status information. + // // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19395,6 +24517,20 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -19490,6 +24626,20 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19507,6 +24657,8 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { type PutBucketNotificationConfigurationInput struct { _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19570,6 +24722,20 @@ func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v return s } +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -19587,9 +24753,13 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { type PutBucketNotificationInput struct { _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The container for the configuration. + // // NotificationConfiguration is a required field NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19642,6 +24812,20 @@ func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *Notificatio return s } +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -19659,6 +24843,8 @@ func (s PutBucketNotificationOutput) GoString() string { type PutBucketPolicyInput struct { _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + // The name of the bucket. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19726,6 +24912,20 @@ func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -19743,6 +24943,8 @@ func (s PutBucketPolicyOutput) GoString() string { type PutBucketReplicationInput struct { _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + // The name of the bucket + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19752,7 +24954,6 @@ type PutBucketReplicationInput struct { // ReplicationConfiguration is a required field ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // A token that allows Amazon S3 object lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -19815,6 +25016,20 @@ func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInpu return s } +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -19832,9 +25047,13 @@ func (s PutBucketReplicationOutput) GoString() string { type PutBucketRequestPaymentInput struct { _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for Payer. + // // RequestPaymentConfiguration is a required field RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19892,6 +25111,20 @@ func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *Request return s } +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -19909,9 +25142,13 @@ func (s PutBucketRequestPaymentOutput) GoString() string { type PutBucketTaggingInput struct { _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Container for the TagSet and Tag elements. + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -19969,6 +25206,20 @@ func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -19986,6 +25237,8 @@ func (s PutBucketTaggingOutput) GoString() string { type PutBucketVersioningInput struct { _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -19993,9 +25246,7 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - // Describes the versioning state of an Amazon S3 bucket. For more information, - // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) - // in the Amazon Simple Storage Service API Reference. + // Container for setting the versioning state. // // VersioningConfiguration is a required field VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -20055,6 +25306,20 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -20072,10 +25337,12 @@ func (s PutBucketVersioningOutput) GoString() string { type PutBucketWebsiteInput struct { _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies website configuration parameters for an Amazon S3 bucket. + // Container for the request. // // WebsiteConfiguration is a required field WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -20134,6 +25401,20 @@ func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) return s } +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -20151,12 +25432,23 @@ func (s PutBucketWebsiteOutput) GoString() string { type PutObjectAclInput struct { _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20176,13 +25468,16 @@ type PutObjectAclInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + // Key for which the PUT operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // VersionId used to reference a specific version of the object. @@ -20299,6 +25594,20 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -20326,44 +25635,62 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { type PutObjectInput struct { _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` - // The canned ACL to apply to the object. + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. Body io.ReadSeeker `type:"blob"` - // Name of the bucket to which the PUT operation was initiated. + // Bucket name to which the PUT operation was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Specifies caching behavior along the request/reply chain. + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - // Specifies presentational information for the object. + // Specifies presentational information for the object. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` // Specifies what content encodings have been applied to the object and thus // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. + // by the Content-Type header field. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` // The language the content is in. ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. + // body cannot be determined automatically. For more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` - // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required - // if object lock parameters are specified. + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // A standard MIME type describing the format of the object data. + // A standard MIME type describing the format of the contents. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The date and time at which the object is no longer cacheable. + // The date and time at which the object is no longer cacheable. For more information, + // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. @@ -20386,34 +25713,37 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - // The Legal Hold status that you want to apply to the specified object. + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` - // The object lock mode that you want to apply to this object. + // The Object Lock mode that you want to apply to this object. ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` - // The date and time when you want this object's object lock to expire. + // The date and time when you want this object's Object Lock to expire. ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Specifies the AWS KMS Encryption Context to use for object encryption. The @@ -20421,17 +25751,24 @@ type PutObjectInput struct { // encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetrical customer managed customer master key (CMK) that was used for + // the object. + // + // If the value of x-amz-server-side-encryption is aws:kms, this header specifies + // the ID of the symmetric customer managed AWS KMS CMK that will be used for + // the object. If you specify x-amz-server-side-encryption:aws:kms, but do not + // providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the AWS + // managed CMK in AWS to protect the data. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // If you don't specify, Standard is the default storage class. Amazon S3 supports + // other storage classes. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -20440,7 +25777,22 @@ type PutObjectInput struct { // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` } @@ -20670,10 +26022,32 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLegalHoldInput struct { _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` - // The bucket containing the object that you want to place a Legal Hold on. + // The bucket name containing the object that you want to place a Legal Hold + // on. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20687,10 +26061,11 @@ type PutObjectLegalHoldInput struct { // specified object. LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The version ID of the object that you want to place a Legal Hold on. @@ -20766,6 +26141,20 @@ func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInpu return s } +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLegalHoldOutput struct { _ struct{} `type:"structure"` @@ -20793,21 +26182,22 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo type PutObjectLockConfigurationInput struct { _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` - // The bucket whose object lock configuration you want to create or replace. + // The bucket whose Object Lock configuration you want to create or replace. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The object lock configuration that you want to apply to the specified bucket. + // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // A token to allow Amazon S3 object lock to be enabled for an existing bucket. + // A token to allow Object Lock to be enabled for an existing bucket. Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } @@ -20868,6 +26258,20 @@ func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfi return s } +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectLockConfigurationOutput struct { _ struct{} `type:"structure"` @@ -20898,8 +26302,10 @@ type PutObjectOutput struct { // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs that provide information about object expiration. The value + // of the rule-id is URL encoded. Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` // If present, indicates that the requester was successfully charged for the @@ -20912,7 +26318,7 @@ type PutObjectOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` @@ -20921,12 +26327,16 @@ type PutObjectOutput struct { // the encryption context key-value pairs. SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If x-amz-server-side-encryption is present and has the value of aws:kms, + // this header specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // If you specified server-side encryption either with an AWS KMS customer master + // key (CMK) or Amazon S3-managed encryption key in your PUT request, the response + // includes this header. It confirms the encryption algorithm that Amazon S3 + // used to encrypt the object. ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Version of the object. @@ -21000,13 +26410,20 @@ func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { type PutObjectRetentionInput struct { _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` - // The bucket that contains the object you want to apply this Object Retention + // The bucket name that contains the object you want to apply this Object Retention // configuration to. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions.j + // Indicates whether this operation should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` // The key name for the object that you want to apply this Object Retention @@ -21015,10 +26432,11 @@ type PutObjectRetentionInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // The container element for the Object Retention configuration. @@ -21104,6 +26522,20 @@ func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInpu return s } +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectRetentionOutput struct { _ struct{} `type:"structure"` @@ -21131,15 +26563,29 @@ func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetenti type PutObjectTaggingInput struct { _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + // The bucket name containing the object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Name of the tag. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Container for the TagSet and Tag elements + // // Tagging is a required field Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The versionId of the object that the tag-set will be added to. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -21214,9 +26660,24 @@ func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { return s } +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` + // The versionId of the object the tag-set was added to. VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } @@ -21303,6 +26764,20 @@ func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicA return s } +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -21322,6 +26797,8 @@ func (s PutPublicAccessBlockOutput) GoString() string { type QueueConfiguration struct { _ struct{} `type:"structure"` + // A collection of bucket events for which to send notifications + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` @@ -21391,6 +26868,10 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } +// This data type is deprecated. Use QueueConfiguration for the same purposes. +// This data type specifies the configuration for publishing messages to an +// Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified +// events. type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -21399,12 +26880,15 @@ type QueueConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of bucket events for which to send notifications Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. Queue *string `type:"string"` } @@ -21442,6 +26926,7 @@ func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDep return s } +// The container for the records event. type RecordsEvent struct { _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` @@ -21481,6 +26966,13 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + // Specifies how requests are redirected. In the event of an error, you can // specify a different error code to return. type Redirect struct { @@ -21608,7 +27100,7 @@ type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the AWS Identity and Access Management // (IAM) role that Amazon S3 assumes when replicating objects. For more information, - // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) // in the Amazon Simple Storage Service Developer Guide. // // Role is a required field @@ -21673,14 +27165,30 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo type ReplicationRule struct { _ struct{} `type:"structure"` - // Specifies whether Amazon S3 should replicate delete makers. + // Specifies whether Amazon S3 replicates the delete markers. If you specify + // a Filter, you must specify this element. However, in the latest version of + // replication configuration (when Filter is specified), Amazon S3 doesn't replicate + // delete markers. Therefore, the DeleteMarkerReplication element can contain + // only Disabled. For an example configuration, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, Amazon + // S3 handled replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` - // A container for information about the replication destination. + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). // // Destination is a required field Destination *Destination `type:"structure" required:"true"` + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 Developer Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + // A filter that identifies the subset of objects to which the replication rule // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. Filter *ReplicationRuleFilter `type:"structure"` @@ -21688,9 +27196,9 @@ type ReplicationRule struct { // A unique identifier for the rule. The maximum value is 255 characters. ID *string `type:"string"` - // An object keyname prefix that identifies the object or objects to which the - // rule applies. The maximum prefix length is 1,024 characters. To include all - // objects in a bucket, specify an empty string. + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -21700,21 +27208,21 @@ type ReplicationRule struct { // when filtering. If two or more rules identify the same object based on a // specified filter, the rule with higher priority takes precedence. For example: // - // * Same object quality prefix based filter criteria If prefixes you specified + // * Same object quality prefix-based filter criteria if prefixes you specified // in multiple rules overlap // - // * Same object qualify tag based filter criteria specified in multiple + // * Same object qualify tag-based filter criteria specified in multiple // rules // - // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) - // in the Amazon S3 Developer Guide. + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon Simple Storage Service Developer Guide. Priority *int64 `type:"integer"` // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using - // an AWS KMS-Managed Key (SSE-KMS). + // a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` // Specifies whether the rule is enabled. @@ -21747,6 +27255,11 @@ func (s *ReplicationRule) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } if s.Filter != nil { if err := s.Filter.Validate(); err != nil { invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) @@ -21776,6 +27289,12 @@ func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { return s } +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + // SetFilter sets the Filter field's value. func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { s.Filter = v @@ -21812,11 +27331,25 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { return s } +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// * If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// * If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag type ReplicationRuleAndOperator struct { _ struct{} `type:"structure"` + // An object key name prefix that identifies the subset of objects to which + // the rule applies. Prefix *string `type:"string"` + // An array of tags containing key and value pairs. Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` } @@ -21878,8 +27411,8 @@ type ReplicationRuleFilter struct { // in an And tag. And *ReplicationRuleAndOperator `type:"structure"` - // An object keyname prefix that identifies the subset of objects to which the - // rule applies. + // An object key name prefix that identifies the subset of objects to which + // the rule applies. Prefix *string `type:"string"` // A container for specifying a tag key and value. @@ -21936,6 +27469,91 @@ func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { return s } +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid values: 15 minutes. + Minutes *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. type RequestPaymentConfiguration struct { _ struct{} `type:"structure"` @@ -21974,6 +27592,7 @@ func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfigur return s } +// Container for specifying if periodic QueryProgress messages should be sent. type RequestProgress struct { _ struct{} `type:"structure"` @@ -22001,21 +27620,34 @@ func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { type RestoreObjectInput struct { _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + // The bucket name or containing the object to restore. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation using an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Object key for which the operation was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` // Container for restore job parameters. RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // VersionId used to reference a specific version of the object. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -22093,6 +27725,20 @@ func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { return s } +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -22298,6 +27944,7 @@ type Rule struct { // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + // Specifies the expiration for the lifecycle of the object. Expiration *LifecycleExpiration `type:"structure"` // Unique identifier for the rule. The value can't be longer than 255 characters. @@ -22409,12 +28056,12 @@ func (s *Rule) SetTransition(v *Transition) *Rule { return s } -// Specifies the use of SSE-KMS to encrypt delivered Inventory reports. +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. type SSEKMS struct { _ struct{} `locationName:"SSE-KMS" type:"structure"` - // Specifies the ID of the AWS Key Management Service (KMS) master encryption - // key to use for encrypting Inventory reports. + // Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric customer + // managed customer master key (CMK) to use for encrypting inventory reports. // // KeyId is a required field KeyId *string `type:"string" required:"true" sensitive:"true"` @@ -22449,7 +28096,7 @@ func (s *SSEKMS) SetKeyId(v string) *SSEKMS { return s } -// Specifies the use of SSE-S3 to encrypt delivered Inventory reports. +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. type SSES3 struct { _ struct{} `locationName:"SSE-S3" type:"structure"` } @@ -22464,16 +28111,24 @@ func (s SSES3) GoString() string { return s.String() } +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. type ScanRange struct { _ struct{} `type:"structure"` // Specifies the end of the byte range. This parameter is optional. Valid values: // non-negative integers. The default value is one less than the size of the - // object being queried. + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. End *int64 `type:"long"` // Specifies the start of the byte range. This parameter is optional. Valid - // values: non-negative integers. The default value is 0. + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file.For example; 50 + // means scan from byte 50 until the end of the file. Start *int64 `type:"long"` } @@ -22499,75 +28154,8 @@ func (s *ScanRange) SetStart(v int64) *ScanRange { return s } -// SelectObjectContentEventStream provides handling of EventStreams for -// the SelectObjectContent API. -// -// Use this type to receive SelectObjectContentEventStream events. The events -// can be read from the Events channel member. -// -// The events that can be received are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -type SelectObjectContentEventStream struct { - // Reader is the EventStream reader for the SelectObjectContentEventStream - // events. This value is automatically set by the SDK when the API call is made - // Use this member when unit testing your code with the SDK to mock out the - // EventStream Reader. - // - // Must not be nil. - Reader SelectObjectContentEventStreamReader - - // StreamCloser is the io.Closer for the EventStream connection. For HTTP - // EventStream this is the response Body. The stream will be closed when - // the Close method of the EventStream is called. - StreamCloser io.Closer -} - -// Close closes the EventStream. This will also cause the Events channel to be -// closed. You can use the closing of the Events channel to terminate your -// application's read from the API's EventStream. -// -// Will close the underlying EventStream reader. For EventStream over HTTP -// connection this will also close the HTTP connection. -// -// Close must be called when done using the EventStream API. Not calling Close -// may result in resource leaks. -func (es *SelectObjectContentEventStream) Close() (err error) { - es.Reader.Close() - es.StreamCloser.Close() - - return es.Err() -} - -// Err returns any error that occurred while reading EventStream Events from -// the service API's response. Returns nil if there were no errors. -func (es *SelectObjectContentEventStream) Err() error { - if err := es.Reader.Err(); err != nil { - return err - } - return nil -} - -// Events returns a channel to read EventStream Events from the -// SelectObjectContent API. -// -// These events are: -// -// * ContinuationEvent -// * EndEvent -// * ProgressEvent -// * RecordsEvent -// * StatsEvent -func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { - return es.Reader.Events() -} - // SelectObjectContentEventStreamEvent groups together all EventStream -// events read from the SelectObjectContent API. +// events writes for SelectObjectContentEventStream. // // These events are: // @@ -22578,11 +28166,12 @@ func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEve // * StatsEvent type SelectObjectContentEventStreamEvent interface { eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler } -// SelectObjectContentEventStreamReader provides the interface for reading EventStream -// Events from the SelectObjectContent API. The -// default implementation for this interface will be SelectObjectContentEventStream. +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. // // The reader's Close method must allow multiple concurrent calls. // @@ -22597,8 +28186,7 @@ type SelectObjectContentEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SelectObjectContentEventStreamEvent - // Close will close the underlying event stream reader. For event stream over - // HTTP this will also close the HTTP connection. + // Close will stop the reader reading events from the stream. Close() error // Returns any error that has occurred while reading from the event stream. @@ -22608,57 +28196,44 @@ type SelectObjectContentEventStreamReader interface { type readSelectObjectContentEventStream struct { eventReader *eventstreamapi.EventReader stream chan SelectObjectContentEventStreamEvent - errVal atomic.Value + err *eventstreamapi.OnceError done chan struct{} closeOnce sync.Once } -func newReadSelectObjectContentEventStream( - reader io.ReadCloser, - unmarshalers request.HandlerList, - logger aws.Logger, - logLevel aws.LogLevelType, -) *readSelectObjectContentEventStream { +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { r := &readSelectObjectContentEventStream{ - stream: make(chan SelectObjectContentEventStreamEvent), - done: make(chan struct{}), + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), } - - r.eventReader = eventstreamapi.NewEventReader( - reader, - protocol.HandlerPayloadUnmarshal{ - Unmarshalers: unmarshalers, - }, - r.unmarshalerForEventType, - ) - r.eventReader.UseLogger(logger, logLevel) + go r.readEventStream() return r } -// Close will close the underlying event stream reader. For EventStream over -// HTTP this will also close the HTTP connection. +// Close will close the underlying event stream reader. func (r *readSelectObjectContentEventStream) Close() error { r.closeOnce.Do(r.safeClose) - return r.Err() } +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + func (r *readSelectObjectContentEventStream) safeClose() { close(r.done) - err := r.eventReader.Close() - if err != nil { - r.errVal.Store(err) - } } func (r *readSelectObjectContentEventStream) Err() error { - if v := r.errVal.Load(); v != nil { - return v.(error) - } - - return nil + return r.err.Err() } func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { @@ -22666,6 +28241,7 @@ func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContent } func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() defer close(r.stream) for { @@ -22680,7 +28256,7 @@ func (r *readSelectObjectContentEventStream) readEventStream() { return default: } - r.errVal.Store(err) + r.err.SetError(err) return } @@ -22692,22 +28268,20 @@ func (r *readSelectObjectContentEventStream) readEventStream() { } } -func (r *readSelectObjectContentEventStream) unmarshalerForEventType( - eventType string, -) (eventstreamapi.Unmarshaler, error) { +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { switch eventType { case "Cont": return &ContinuationEvent{}, nil - case "End": return &EndEvent{}, nil - case "Progress": return &ProgressEvent{}, nil - case "Records": return &RecordsEvent{}, nil - case "Stats": return &StatsEvent{}, nil default: @@ -22739,7 +28313,7 @@ type SelectObjectContentInput struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (for example., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -22778,6 +28352,18 @@ type SelectObjectContentInput struct { // is processed when its first byte is contained by the range. This parameter // is optional, but when specified, it must not be empty. See RFC 2616, Section // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. ScanRange *ScanRange `type:"structure"` } @@ -22905,11 +28491,24 @@ func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectConte return s } +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` - // Use EventStream to use the API's stream. - EventStream *SelectObjectContentEventStream `type:"structure"` + EventStream *SelectObjectContentEventStream } // String returns the string representation @@ -22922,29 +28521,17 @@ func (s SelectObjectContentOutput) GoString() string { return s.String() } -// SetEventStream sets the EventStream field's value. func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { s.EventStream = v return s } +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} -func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { - if r.Error != nil { - return - } - reader := newReadSelectObjectContentEventStream( - r.HTTPResponse.Body, - r.Handlers.UnmarshalStream, - r.Config.Logger, - r.Config.LogLevel.Value(), - ) - go reader.readEventStream() - - eventStream := &SelectObjectContentEventStream{ - StreamCloser: r.HTTPResponse.Body, - Reader: reader, - } - s.EventStream = eventStream +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream } // Describes the parameters for Select job types. @@ -22956,7 +28543,7 @@ type SelectParameters struct { // Expression is a required field Expression *string `type:"string" required:"true"` - // The type of the provided expression (e.g., SQL). + // The type of the provided expression (for example, SQL). // // ExpressionType is a required field ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` @@ -23176,7 +28763,7 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter // that you can specify for objects created with server-side encryption using -// an AWS KMS-Managed Key (SSE-KMS). +// a customer master key (CMK) stored in AWS Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { _ struct{} `type:"structure"` @@ -23223,7 +28810,7 @@ type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` // Specifies whether Amazon S3 replicates objects created with server-side encryption - // using an AWS KMS-managed key. + // using a customer master key (CMK) stored in AWS Key Management Service. // // Status is a required field Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` @@ -23258,6 +28845,7 @@ func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { return s } +// Container for the stats details. type Stats struct { _ struct{} `type:"structure"` @@ -23299,6 +28887,7 @@ func (s *Stats) SetBytesScanned(v int64) *Stats { return s } +// Container for the Stats Event. type StatsEvent struct { _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` @@ -23339,6 +28928,16 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + // Specifies data related to access patterns to be collected and made available // to analyze the tradeoffs between different storage classes for an Amazon // S3 bucket. @@ -23381,6 +28980,8 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -23438,6 +29039,7 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } +// A container of a key value name pair. type Tag struct { _ struct{} `type:"structure"` @@ -23493,9 +29095,12 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Container for TagSet elements. type Tagging struct { _ struct{} `type:"structure"` + // A collection for a set of tags + // // TagSet is a required field TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` } @@ -23539,9 +29144,11 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } +// Container for granting information. type TargetGrant struct { _ struct{} `type:"structure"` + // Container for the person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. @@ -23664,6 +29271,10 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// instead. type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -23672,6 +29283,7 @@ type TopicConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` + // A collection of events related to objects Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. @@ -23764,6 +29376,8 @@ func (s *Transition) SetStorageClass(v string) *Transition { type UploadPartCopyInput struct { _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + // The bucket name. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23789,11 +29403,12 @@ type UploadPartCopyInput struct { // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object + // first 10 bytes of the source. You can copy a range only if the source object // is greater than 5 MB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt @@ -23802,10 +29417,12 @@ type UploadPartCopyInput struct { CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // Object key for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -23815,26 +29432,28 @@ type UploadPartCopyInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being copied. @@ -24007,9 +29626,24 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` + // Container for all response elements. CopyPartResult *CopyPartResult `type:"structure"` // The version of the source object that was copied, if you have enabled versioning @@ -24026,16 +29660,17 @@ type UploadPartCopyOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for the + // object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -24107,7 +29742,7 @@ type UploadPartInput struct { ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The base64-encoded 128-bit MD5 digest of the part data. This parameter is - // auto-populated when using the command from the CLI. This parameted is required + // auto-populated when using the command from the CLI. This parameter is required // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` @@ -24122,26 +29757,28 @@ type UploadPartInput struct { // PartNumber is a required field PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + // Specifies the algorithm to use to when encrypting the object (for example, + // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` // Upload ID identifying the multipart upload whose part is being uploaded. @@ -24268,6 +29905,20 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -24284,16 +29935,16 @@ type UploadPartOutput struct { SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity + // the response will include this header to provide round-trip message integrity // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) was used for the object. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` } @@ -24608,11 +30259,37 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + // EventS3ObjectRestorePost is a Event enum value EventS3ObjectRestorePost = "s3:ObjectRestore:Post" // EventS3ObjectRestoreCompleted is a Event enum value EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" +) + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" ) const ( @@ -24741,6 +30418,14 @@ const ( MetadataDirectiveReplace = "REPLACE" ) +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + const ( // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" @@ -24889,6 +30574,14 @@ const ( ReplicationStatusReplica = "REPLICA" ) +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + // If present, indicates that the requester was successfully charged for the // request. const ( @@ -24896,10 +30589,11 @@ const ( RequestChargedRequester = "requester" ) -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. For information +// about downloading objects from requester pays buckets, see Downloading Objects +// in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 Developer Guide. const ( // RequestPayerRequester is a RequestPayer enum value RequestPayerRequester = "requester" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 23d386b16..036d0b2e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -4,6 +4,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/s3err" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) func init() { @@ -13,7 +14,7 @@ func init() { func defaultInitClientFn(c *client.Client) { // Support building custom endpoints based on config - c.Handlers.Build.PushFront(updateEndpointForS3Config) + c.Handlers.Build.PushFront(endpointHandler) // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) @@ -27,7 +28,7 @@ func defaultInitClientFn(c *client.Client) { } func defaultInitRequestFn(r *request.Request) { - // Add reuest handlers for specific platforms. + // Add request handlers for specific platforms. // e.g. 100-continue support for PUT requests using Go 1.6 platformRequestHandlers(r) @@ -73,3 +74,8 @@ type sseCustomerKeyGetter interface { type copySourceSSECustomerKeyGetter interface { getCopySourceSSECustomerKey() string } + +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 000000000..c4048fbfb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,233 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + return arn.ParseAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = newInvalidARNError(nil, err) + return + } + + resReq := resourceRequest{ + Resource: resource, + Request: req, + } + + if resReq.IsCrossPartition() { + req.Error = newClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = newClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if resReq.HasCustomEndpoint() { + req.Error = newInvalidARNWithCustomEndpointError(resource, nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = newInvalidARNError(resource, nil) + } +} + +type resourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +func (r resourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +func (r resourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +func (r resourceRequest) UseFIPS() bool { + return isFIPS(aws.StringValue(r.Request.Config.Region)) +} + +func (r resourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +func (r resourceRequest) IsCrossRegion() bool { + return isCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +func (r resourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +func isFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} +func isCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return newClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +type accessPointEndpointBuilder arn.AccessPointARN + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." +) + +func (a accessPointEndpointBuilder) Build(req *request.Request) error { + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if isFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion) + if err != nil { + return newFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + const serviceEndpointLabel = "s3-accesspoint" + + // dualstack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + + protocol.HostPrefixBuilder{ + Prefix: accesPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + req.ClientInfo.SigningName = endpoint.SigningName + req.ClientInfo.SigningRegion = endpoint.SigningRegion + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return newInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go new file mode 100644 index 000000000..9df03e78d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go @@ -0,0 +1,151 @@ +package s3 + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3/internal/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +type invalidARNError struct { + message string + resource arn.Resource + origErr error +} + +func (e invalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e invalidARNError) Code() string { + return invalidARNErrorErrCode +} + +func (e invalidARNError) Message() string { + return e.message +} + +func (e invalidARNError) OrigErr() error { + return e.origErr +} + +func newInvalidARNError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// ARN not supported for the target partition +func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError { + return invalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +type configurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +func (e configurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +func (e configurationError) Code() string { + return configurationErrorErrCode +} + +func (e configurationError) Message() string { + return e.message +} + +func (e configurationError) OrigErr() error { + return e.origErr +} + +func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for S3 Accelerate but is supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { + return configurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index 931cb17bb..49aeff16f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -13,6 +13,12 @@ const ( // ErrCodeBucketAlreadyOwnedByYou for service response error code // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all AWS Regions except in the North Virginia Region. + // For legacy compatibility, if you re-create an existing bucket that you already + // own in the North Virginia Region, Amazon S3 returns 200 OK and resets the + // bucket access control lists (ACLs). ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" // ErrCodeNoSuchBucket for service response error code @@ -36,13 +42,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier + // This operation is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon Glacier. + // only stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index a7fbc2de2..81cdec1ae 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -30,10 +30,10 @@ var accelerateOpBlacklist = operationBlacklist{ opListBuckets, opCreateBucket, opDeleteBucket, } -// Request handler to automatically add the bucket name to the endpoint domain +// Automatically add the bucket name to the endpoint domain // if possible. This style of bucket is valid for all bucket names which are // DNS compatible and do not contain "." -func updateEndpointForS3Config(r *request.Request) { +func updateEndpointForS3Config(r *request.Request, bucketName string) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) @@ -43,45 +43,29 @@ func updateEndpointForS3Config(r *request.Request) { r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") } } - updateEndpointForAccelerate(r) + updateEndpointForAccelerate(r, bucketName) } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { - updateEndpointForHostStyle(r) + updateEndpointForHostStyle(r, bucketName) } } -func updateEndpointForHostStyle(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { // bucket name must be valid to put into the host return } - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } var ( accelElem = []byte("s3-accelerate.dualstack.") ) -func updateEndpointForAccelerate(r *request.Request) { - bucket, ok := bucketNameFromReqParams(r.Params) - if !ok { - // Ignore operation requests if the bucketname was not provided - // if this is an input validation error the validation handler - // will report it. - return - } - - if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { r.Error = awserr.New("InvalidParameterException", - fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), nil) return } @@ -106,7 +90,7 @@ func updateEndpointForAccelerate(r *request.Request) { r.HTTPRequest.URL.Host = strings.Join(parts, ".") - moveBucketToHost(r.HTTPRequest.URL, bucket) + moveBucketToHost(r.HTTPRequest.URL, bucketName) } // Attempts to retrieve the bucket name from the request input parameters. @@ -148,8 +132,5 @@ func dnsCompatibleBucketName(bucket string) bool { // moveBucketToHost moves the bucket name from the URI path to URL host. func moveBucketToHost(u *url.URL, bucket string) { u.Host = bucket + "." + u.Host - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" - } + removeBucketFromPath(u) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go new file mode 100644 index 000000000..2f93f96fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go @@ -0,0 +1,45 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{a, "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{a, "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go new file mode 100644 index 000000000..a942d887f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go @@ -0,0 +1,71 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{a, "partition not set"} + } + if a.Service != "s3" { + return nil, InvalidARNError{a, "service is not S3"} + } + if len(a.Resource) == 0 { + return nil, InvalidARNError{a, "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +func (e InvalidARNError) Error() string { + return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 07e129737..b4c07b4d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "s3" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "S3" // ServiceID is a unique identifer of a specific service. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a S3 client from just a session. // svc := s3.New(mySession) // @@ -76,6 +78,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) // Run custom client initialization if present diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 9c5ed4545..7f60d4aa1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -78,6 +78,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRole last // for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. You can provide a value from 900 @@ -91,6 +93,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRole can be used to make // API calls to any AWS service with the following exception: You cannot call // the AWS STS GetFederationToken or GetSessionToken API operations. @@ -99,7 +103,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent @@ -131,6 +135,24 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// // Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information @@ -165,9 +187,18 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being @@ -256,6 +287,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. Your role session lasts for the @@ -271,6 +304,8 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot // call the STS GetFederationToken or GetSessionToken API operations. @@ -279,7 +314,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent @@ -289,12 +324,6 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Before your application can call AssumeRoleWithSAML, you must configure your -// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, -// you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider. You must -// also create an IAM role that specifies this SAML provider in its trust policy. -// // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document // that is uploaded for the SAML provider entity for your identity provider. @@ -302,8 +331,50 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail // logs. The entry includes the value in the NameID element of the SAML assertion. // We recommend that you use a NameIDType that is not associated with any personally -// identifiable information (PII). For example, you could instead use the Persistent -// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. // // For more information, see the following resources: // @@ -332,9 +403,18 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -456,6 +536,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // key ID, a secret access key, and a security token. Applications can use these // temporary security credentials to sign calls to AWS service API operations. // +// Session Duration +// // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter // to specify the duration of your session. You can provide a value from 900 @@ -469,6 +551,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Permissions +// // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: // you cannot call the STS GetFederationToken or GetSessionToken API operations. @@ -477,7 +561,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. Passing policies +// and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and // the session policies. You can use the role's temporary credentials in subsequent @@ -487,6 +571,42 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // +// Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plain text session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An AWS conversion compresses the passed session policies and session tags +// into a packed binary format that has a separate limit. Your request can fail +// for this limit even if your plain text meets the other requirements. The +// PackedPolicySize response element indicates by percentage how close the policies +// and tags for your request are to the upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// Identities +// // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that // the application can assume. The role that your application assumes must trust @@ -514,8 +634,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and // AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). // These toolkits contain sample apps that show how to invoke the identity -// providers, and then how to use the information from these providers to -// get and use temporary security credentials. +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // // * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of @@ -535,9 +655,18 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" // The identity provider (IdP) reported that authentication failed. This might @@ -547,11 +676,11 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // can also mean that the claim has expired or has been explicitly revoked. // // * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the non-AWS identity provider -// (IDP) that was asked to verify the incoming identity token could not be reached. -// This is often a transient error caused by network conditions. Retry the request +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the -// error persists, the non-AWS identity provider might be down or not responding. +// error persists, the identity provider might be down or not responding. // // * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" // The web identity token that was passed could not be validated by AWS. Get @@ -676,9 +805,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // Returned Error Codes: // * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// This error is returned if the message passed to DecodeAuthorizationMessage -// was invalid. This can happen if the token contains invalid characters, such -// as linebreaks. +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { @@ -763,7 +892,8 @@ func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *reques // pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) // to learn which IAM user owns the keys. To learn who requested the temporary // credentials for an ASIA access key, view the STS events in your CloudTrail -// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html). +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. // // This operation does not indicate the state of the access key. The key might // be active, inactive, or deleted. Active keys might not have permissions to @@ -850,7 +980,8 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // sts:GetCallerIdentity action, you can still perform this operation. Permissions // are not required because the same information is returned when an IAM user // or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa). +// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -942,7 +1073,8 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // or an OpenID Connect-compatible identity provider. In this case, we recommend // that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. // // You can also call GetFederationToken using the security credentials of an // AWS account root user, but we do not recommend it. Instead, we recommend @@ -952,41 +1084,67 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // +// Session duration +// // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// is 43,200 seconds (12 hours). Temporary credentials that are obtained by -// using AWS account root user credentials have a maximum duration of 3,600 -// seconds (1 hour). -// -// The temporary security credentials created by GetFederationToken can be used -// to make API calls to any AWS service with the following exceptions: -// -// * You cannot use these credentials to call any IAM API operations. -// -// * You cannot call any STS API operations except GetCallerIdentity. +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). // // Permissions // +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline -// and managed session policies shouldn't exceed 2048 characters. +// and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. The only exception -// is when the credentials are used to access a resource that has a resource-based -// policy that specifically references the federated user session in the Principal -// element of the policy. When you pass session policies, the session permissions -// are the intersection of the IAM user policies and the session policies that -// you pass. This gives you a way to further restrict the permissions for a -// federated user. You cannot use session policies to grant more permissions -// than those that are defined in the permissions policy of the IAM user. For -// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. For information about using GetFederationToken to // create temporary security credentials, see GetFederationToken—Federation // Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1000,9 +1158,18 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // message describes the specific error. // // * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the policy document was too large. The error -// message describes how big the policy document is, in packed form, as a percentage -// of what the API allows. +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An AWS conversion compresses the +// session policy document, session policy ARNs, and session tags into a packed +// binary format that has a separate limit. The error message indicates by percentage +// how close the policies and tags are to the upper size limit. For more information, +// see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // * ErrCodeRegionDisabledException "RegionDisabledException" // STS is not activated in the requested region for the account that is being @@ -1091,6 +1258,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // +// Session Duration +// // The GetSessionToken operation must be called by using the long-term AWS security // credentials of the AWS account root user or an IAM user. Credentials that // are created by IAM users are valid for the duration that you specify. This @@ -1099,6 +1268,8 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // based on account credentials can range from 900 seconds (15 minutes) up to // 3,600 seconds (1 hour), with a default of 1 hour. // +// Permissions +// // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // @@ -1213,16 +1384,16 @@ type AssumeRoleInput struct { // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1231,15 +1402,15 @@ type AssumeRoleInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, + // policies can't exceed 2,048 characters. For more information about ARNs, // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1284,6 +1455,41 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters, and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the AWS CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -1292,6 +1498,19 @@ type AssumeRoleInput struct { // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` } // String returns the string representation @@ -1344,6 +1563,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1393,12 +1622,24 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { return s } +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + // SetTokenCode sets the TokenCode field's value. func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { s.TokenCode = &v return s } +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. type AssumeRoleOutput struct { @@ -1418,9 +1659,10 @@ type AssumeRoleOutput struct { // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -1491,16 +1733,16 @@ type AssumeRoleWithSAMLInput struct { // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1509,15 +1751,15 @@ type AssumeRoleWithSAMLInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, + // policies can't exceed 2,048 characters. For more information about ARNs, // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1673,9 +1915,10 @@ type AssumeRoleWithSAMLOutput struct { // ) ) NameQualifier *string `type:"string"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The value of the NameID element in the Subject element of the SAML assertion. @@ -1786,16 +2029,16 @@ type AssumeRoleWithWebIdentityInput struct { // in the IAM User Guide. // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1804,15 +2047,15 @@ type AssumeRoleWithWebIdentityInput struct { // // This parameter is optional. You can provide up to 10 managed policy ARNs. // However, the plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. For more information about ARNs, + // policies can't exceed 2,048 characters. For more information about ARNs, // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1983,9 +2226,10 @@ type AssumeRoleWithWebIdentityOutput struct { // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` - // A percentage value that indicates the size of the policy in packed form. - // The service rejects any policy with a packed size greater than 100 percent, - // which means the policy exceeded the allowed space. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect @@ -2057,7 +2301,7 @@ type AssumedRoleUser struct { // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2225,7 +2469,7 @@ type FederatedUser struct { // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) - // in Using IAM. + // in the IAM User Guide. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -2265,7 +2509,7 @@ type GetAccessKeyInfoInput struct { // The identifier of an access key. // // This parameter allows (through its regex pattern) a string of characters - // that can consist of any upper- or lowercased letter or digit. + // that can consist of any upper- or lowercase letter or digit. // // AccessKeyId is a required field AccessKeyId *string `min:"16" type:"string" required:"true"` @@ -2418,10 +2662,7 @@ type GetFederationTokenInput struct { // use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. + // then the resulting federated user session has no permissions. // // When you pass session policies, the session permissions are the intersection // of the IAM user policies and the session policies that you pass. This gives @@ -2431,17 +2672,23 @@ type GetFederationTokenInput struct { // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // // The plain text that you use for both inline and managed session policies - // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // can't exceed 2,048 characters. The JSON policy characters can be any ASCII // character from the space character to the end of the valid character list // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2452,16 +2699,13 @@ type GetFederationTokenInput struct { // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline - // and managed session policies shouldn't exceed 2048 characters. You can provide + // and managed session policies can't exceed 2,048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // This parameter is optional. However, if you do not pass any session policies, - // then the resulting federated user session has no permissions. The only exception - // is when the credentials are used to access a resource that has a resource-based - // policy that specifically references the federated user session in the Principal - // element of the policy. + // then the resulting federated user session has no permissions. // // When you pass session policies, the session permissions are the intersection // of the IAM user policies and the session policies that you pass. This gives @@ -2471,12 +2715,46 @@ type GetFederationTokenInput struct { // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The characters in this parameter count towards the 2048 character session - // policy guideline. However, an AWS conversion compresses the session policies - // into a packed binary format that has a separate limit. This is the enforced - // limit. The PackedPolicySize response element indicates by percentage how - // close the policy is to the upper size limit. + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plain + // text session tag keys can’t exceed 128 characters and the values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2514,6 +2792,16 @@ func (s *GetFederationTokenInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2545,6 +2833,12 @@ func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetF return s } +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. type GetFederationTokenOutput struct { @@ -2563,9 +2857,10 @@ type GetFederationTokenOutput struct { // an Amazon S3 bucket policy. FederatedUser *FederatedUser `type:"structure"` - // A percentage value indicating the size of the policy in packed form. The - // service rejects policies for which the packed size is greater than 100 percent - // of the allowed value. + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` } @@ -2748,3 +3043,73 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { s.Arn = &v return s } + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging AWS STS +// Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index a3e378eda..a233f542e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -14,11 +14,11 @@ const ( // ErrCodeIDPCommunicationErrorException for service response error code // "IDPCommunicationError". // - // The request could not be fulfilled because the non-AWS identity provider - // (IDP) that was asked to verify the incoming identity token could not be reached. - // This is often a transient error caused by network conditions. Retry the request + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request // a limited number of times so that you don't exceed the request rate. If the - // error persists, the non-AWS identity provider might be down or not responding. + // error persists, the identity provider might be down or not responding. ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" // ErrCodeIDPRejectedClaimException for service response error code @@ -34,9 +34,9 @@ const ( // ErrCodeInvalidAuthorizationMessageException for service response error code // "InvalidAuthorizationMessageException". // - // This error is returned if the message passed to DecodeAuthorizationMessage - // was invalid. This can happen if the token contains invalid characters, such - // as linebreaks. + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" // ErrCodeInvalidIdentityTokenException for service response error code @@ -56,9 +56,18 @@ const ( // ErrCodePackedPolicyTooLargeException for service response error code // "PackedPolicyTooLarge". // - // The request was rejected because the policy document was too large. The error - // message describes how big the policy document is, in packed form, as a percentage - // of what the API allows. + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An AWS conversion compresses the + // session policy document, session policy ARNs, and session tags into a packed + // binary format that has a separate limit. The error message indicates by percentage + // how close the policies and tags are to the upper size limit. For more information, + // see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" // ErrCodeRegionDisabledException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 2c3c3d2c1..d34a68553 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -31,7 +31,7 @@ var initRequest func(*request.Request) const ( ServiceName = "sts" // Name of service. EndpointsID = ServiceName // ID to lookup a service endpoint with. - ServiceID = "STS" // ServiceID is a unique identifer of a specific service. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. ) // New creates a new instance of the STS client with a session. @@ -39,6 +39,8 @@ const ( // aws.Config parameter to add your extra config. // // Example: +// mySession := session.Must(session.NewSession()) +// // // Create a STS client from just a session. // svc := sts.New(mySession) // diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go deleted file mode 100644 index af62279a6..000000000 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/md2man.go +++ /dev/null @@ -1,20 +0,0 @@ -package md2man - -import ( - "github.com/russross/blackfriday" -) - -// Render converts a markdown document into a roff formatted document. -func Render(doc []byte) []byte { - renderer := RoffRenderer(0) - extensions := 0 - extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS - extensions |= blackfriday.EXTENSION_TABLES - extensions |= blackfriday.EXTENSION_FENCED_CODE - extensions |= blackfriday.EXTENSION_AUTOLINK - extensions |= blackfriday.EXTENSION_SPACE_HEADERS - extensions |= blackfriday.EXTENSION_FOOTNOTES - extensions |= blackfriday.EXTENSION_TITLEBLOCK - - return blackfriday.Markdown(doc, renderer, extensions) -} diff --git a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go deleted file mode 100644 index 8c29ec687..000000000 --- a/vendor/github.com/cpuguy83/go-md2man/md2man/roff.go +++ /dev/null @@ -1,285 +0,0 @@ -package md2man - -import ( - "bytes" - "fmt" - "html" - "strings" - - "github.com/russross/blackfriday" -) - -type roffRenderer struct { - ListCounters []int -} - -// RoffRenderer creates a new blackfriday Renderer for generating roff documents -// from markdown -func RoffRenderer(flags int) blackfriday.Renderer { - return &roffRenderer{} -} - -func (r *roffRenderer) GetFlags() int { - return 0 -} - -func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) { - out.WriteString(".TH ") - - splitText := bytes.Split(text, []byte("\n")) - for i, line := range splitText { - line = bytes.TrimPrefix(line, []byte("% ")) - if i == 0 { - line = bytes.Replace(line, []byte("("), []byte("\" \""), 1) - line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1) - } - line = append([]byte("\""), line...) - line = append(line, []byte("\" ")...) - out.Write(line) - } - out.WriteString("\n") - - // disable hyphenation - out.WriteString(".nh\n") - // disable justification (adjust text to left margin only) - out.WriteString(".ad l\n") -} - -func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) { - out.WriteString("\n.PP\n.RS\n\n.nf\n") - escapeSpecialChars(out, text) - out.WriteString("\n.fi\n.RE\n") -} - -func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) { - out.WriteString("\n.PP\n.RS\n") - out.Write(text) - out.WriteString("\n.RE\n") -} - -func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) { // nolint: golint - out.Write(text) -} - -func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { - marker := out.Len() - - switch { - case marker == 0: - // This is the doc header - out.WriteString(".TH ") - case level == 1: - out.WriteString("\n\n.SH ") - case level == 2: - out.WriteString("\n.SH ") - default: - out.WriteString("\n.SS ") - } - - if !text() { - out.Truncate(marker) - return - } -} - -func (r *roffRenderer) HRule(out *bytes.Buffer) { - out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n") -} - -func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) { - marker := out.Len() - r.ListCounters = append(r.ListCounters, 1) - out.WriteString("\n.RS\n") - if !text() { - out.Truncate(marker) - return - } - r.ListCounters = r.ListCounters[:len(r.ListCounters)-1] - out.WriteString("\n.RE\n") -} - -func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) { - if flags&blackfriday.LIST_TYPE_ORDERED != 0 { - out.WriteString(fmt.Sprintf(".IP \"%3d.\" 5\n", r.ListCounters[len(r.ListCounters)-1])) - r.ListCounters[len(r.ListCounters)-1]++ - } else { - out.WriteString(".IP \\(bu 2\n") - } - out.Write(text) - out.WriteString("\n") -} - -func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) { - marker := out.Len() - out.WriteString("\n.PP\n") - if !text() { - out.Truncate(marker) - return - } - if marker != 0 { - out.WriteString("\n") - } -} - -func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) { - out.WriteString("\n.TS\nallbox;\n") - - maxDelims := 0 - lines := strings.Split(strings.TrimRight(string(header), "\n")+"\n"+strings.TrimRight(string(body), "\n"), "\n") - for _, w := range lines { - curDelims := strings.Count(w, "\t") - if curDelims > maxDelims { - maxDelims = curDelims - } - } - out.Write([]byte(strings.Repeat("l ", maxDelims+1) + "\n")) - out.Write([]byte(strings.Repeat("l ", maxDelims+1) + ".\n")) - out.Write(header) - if len(header) > 0 { - out.Write([]byte("\n")) - } - - out.Write(body) - out.WriteString("\n.TE\n") -} - -func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) { - if out.Len() > 0 { - out.WriteString("\n") - } - out.Write(text) -} - -func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { - if out.Len() > 0 { - out.WriteString("\t") - } - if len(text) == 0 { - text = []byte{' '} - } - out.Write([]byte("\\fB\\fC" + string(text) + "\\fR")) -} - -func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { - if out.Len() > 0 { - out.WriteString("\t") - } - if len(text) > 30 { - text = append([]byte("T{\n"), text...) - text = append(text, []byte("\nT}")...) - } - if len(text) == 0 { - text = []byte{' '} - } - out.Write(text) -} - -func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) { - -} - -func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) { - -} - -func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { - out.WriteString("\n\\[la]") - out.Write(link) - out.WriteString("\\[ra]") -} - -func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) { - out.WriteString("\\fB\\fC") - escapeSpecialChars(out, text) - out.WriteString("\\fR") -} - -func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\fB") - out.Write(text) - out.WriteString("\\fP") -} - -func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\fI") - out.Write(text) - out.WriteString("\\fP") -} - -func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) { -} - -func (r *roffRenderer) LineBreak(out *bytes.Buffer) { - out.WriteString("\n.br\n") -} - -func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) { - out.Write(content) - r.AutoLink(out, link, 0) -} - -func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) { // nolint: golint - out.Write(tag) -} - -func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { - out.WriteString("\\s+2") - out.Write(text) - out.WriteString("\\s-2") -} - -func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { -} - -func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { - -} - -func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) { - out.WriteString(html.UnescapeString(string(entity))) -} - -func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) { - escapeSpecialChars(out, text) -} - -func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) { -} - -func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) { -} - -func needsBackslash(c byte) bool { - for _, r := range []byte("-_&\\~") { - if c == r { - return true - } - } - return false -} - -func escapeSpecialChars(out *bytes.Buffer, text []byte) { - for i := 0; i < len(text); i++ { - // escape initial apostrophe or period - if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { - out.WriteString("\\&") - } - - // directly copy normal characters - org := i - - for i < len(text) && !needsBackslash(text[i]) { - i++ - } - if i > org { - out.Write(text[org:i]) - } - - // escape a character - if i >= len(text) { - break - } - out.WriteByte('\\') - out.WriteByte(text[i]) - } -} diff --git a/vendor/github.com/cpuguy83/go-md2man/LICENSE.md b/vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md similarity index 100% rename from vendor/github.com/cpuguy83/go-md2man/LICENSE.md rename to vendor/github.com/cpuguy83/go-md2man/v2/LICENSE.md diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go new file mode 100644 index 000000000..b48005673 --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -0,0 +1,14 @@ +package md2man + +import ( + "github.com/russross/blackfriday/v2" +) + +// Render converts a markdown document into a roff formatted document. +func Render(doc []byte) []byte { + renderer := NewRoffRenderer() + + return blackfriday.Run(doc, + []blackfriday.Option{blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions())}...) +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go new file mode 100644 index 000000000..0668a66cf --- /dev/null +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -0,0 +1,345 @@ +package md2man + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/russross/blackfriday/v2" +) + +// roffRenderer implements the blackfriday.Renderer interface for creating +// roff format (manpages) from markdown text +type roffRenderer struct { + extensions blackfriday.Extensions + listCounters []int + firstHeader bool + defineTerm bool + listDepth int +} + +const ( + titleHeader = ".TH " + topLevelHeader = "\n\n.SH " + secondLevelHdr = "\n.SH " + otherHeader = "\n.SS " + crTag = "\n" + emphTag = "\\fI" + emphCloseTag = "\\fP" + strongTag = "\\fB" + strongCloseTag = "\\fP" + breakTag = "\n.br\n" + paraTag = "\n.PP\n" + hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" + linkTag = "\n\\[la]" + linkCloseTag = "\\[ra]" + codespanTag = "\\fB\\fC" + codespanCloseTag = "\\fR" + codeTag = "\n.PP\n.RS\n\n.nf\n" + codeCloseTag = "\n.fi\n.RE\n" + quoteTag = "\n.PP\n.RS\n" + quoteCloseTag = "\n.RE\n" + listTag = "\n.RS\n" + listCloseTag = "\n.RE\n" + arglistTag = "\n.TP\n" + tableStart = "\n.TS\nallbox;\n" + tableEnd = ".TE\n" + tableCellStart = "T{\n" + tableCellEnd = "\nT}\n" +) + +// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents +// from markdown +func NewRoffRenderer() *roffRenderer { // nolint: golint + var extensions blackfriday.Extensions + + extensions |= blackfriday.NoIntraEmphasis + extensions |= blackfriday.Tables + extensions |= blackfriday.FencedCode + extensions |= blackfriday.SpaceHeadings + extensions |= blackfriday.Footnotes + extensions |= blackfriday.Titleblock + extensions |= blackfriday.DefinitionLists + return &roffRenderer{ + extensions: extensions, + } +} + +// GetExtensions returns the list of extensions used by this renderer implementation +func (r *roffRenderer) GetExtensions() blackfriday.Extensions { + return r.extensions +} + +// RenderHeader handles outputting the header at document start +func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + // disable hyphenation + out(w, ".nh\n") +} + +// RenderFooter handles outputting the footer at the document end; the roff +// renderer has no footer information +func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { +} + +// RenderNode is called for each node in a markdown document; based on the node +// type the equivalent roff output is sent to the writer +func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + + var walkAction = blackfriday.GoToNext + + switch node.Type { + case blackfriday.Text: + r.handleText(w, node, entering) + case blackfriday.Softbreak: + out(w, crTag) + case blackfriday.Hardbreak: + out(w, breakTag) + case blackfriday.Emph: + if entering { + out(w, emphTag) + } else { + out(w, emphCloseTag) + } + case blackfriday.Strong: + if entering { + out(w, strongTag) + } else { + out(w, strongCloseTag) + } + case blackfriday.Link: + if !entering { + out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + } + case blackfriday.Image: + // ignore images + walkAction = blackfriday.SkipChildren + case blackfriday.Code: + out(w, codespanTag) + escapeSpecialChars(w, node.Literal) + out(w, codespanCloseTag) + case blackfriday.Document: + break + case blackfriday.Paragraph: + // roff .PP markers break lists + if r.listDepth > 0 { + return blackfriday.GoToNext + } + if entering { + out(w, paraTag) + } else { + out(w, crTag) + } + case blackfriday.BlockQuote: + if entering { + out(w, quoteTag) + } else { + out(w, quoteCloseTag) + } + case blackfriday.Heading: + r.handleHeading(w, node, entering) + case blackfriday.HorizontalRule: + out(w, hruleTag) + case blackfriday.List: + r.handleList(w, node, entering) + case blackfriday.Item: + r.handleItem(w, node, entering) + case blackfriday.CodeBlock: + out(w, codeTag) + escapeSpecialChars(w, node.Literal) + out(w, codeCloseTag) + case blackfriday.Table: + r.handleTable(w, node, entering) + case blackfriday.TableCell: + r.handleTableCell(w, node, entering) + case blackfriday.TableHead: + case blackfriday.TableBody: + case blackfriday.TableRow: + // no action as cell entries do all the nroff formatting + return blackfriday.GoToNext + default: + fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String()) + } + return walkAction +} + +func (r *roffRenderer) handleText(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + // handle special roff table cell text encapsulation + if node.Parent.Type == blackfriday.TableCell { + if len(node.Literal) > 30 { + start = tableCellStart + end = tableCellEnd + } else { + // end rows that aren't terminated by "tableCellEnd" with a cr if end of row + if node.Parent.Next == nil && !node.Parent.IsHeader { + end = crTag + } + } + } + out(w, start) + escapeSpecialChars(w, node.Literal) + out(w, end) +} + +func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + switch node.Level { + case 1: + if !r.firstHeader { + out(w, titleHeader) + r.firstHeader = true + break + } + out(w, topLevelHeader) + case 2: + out(w, secondLevelHdr) + default: + out(w, otherHeader) + } + } +} + +func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) { + openTag := listTag + closeTag := listCloseTag + if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // tags for definition lists handled within Item node + openTag = "" + closeTag = "" + } + if entering { + r.listDepth++ + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = append(r.listCounters, 1) + } + out(w, openTag) + } else { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + r.listCounters = r.listCounters[:len(r.listCounters)-1] + } + out(w, closeTag) + r.listDepth-- + } +} + +func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1])) + r.listCounters[len(r.listCounters)-1]++ + } else if node.ListFlags&blackfriday.ListTypeDefinition != 0 { + // state machine for handling terms and following definitions + // since blackfriday does not distinguish them properly, nor + // does it seperate them into separate lists as it should + if !r.defineTerm { + out(w, arglistTag) + r.defineTerm = true + } else { + r.defineTerm = false + } + } else { + out(w, ".IP \\(bu 2\n") + } + } else { + out(w, "\n") + } +} + +func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) { + if entering { + out(w, tableStart) + //call walker to count cells (and rows?) so format section can be produced + columns := countColumns(node) + out(w, strings.Repeat("l ", columns)+"\n") + out(w, strings.Repeat("l ", columns)+".\n") + } else { + out(w, tableEnd) + } +} + +func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) { + var ( + start, end string + ) + if node.IsHeader { + start = codespanTag + end = codespanCloseTag + } + if entering { + if node.Prev != nil && node.Prev.Type == blackfriday.TableCell { + out(w, "\t"+start) + } else { + out(w, start) + } + } else { + // need to carriage return if we are at the end of the header row + if node.IsHeader && node.Next == nil { + end = end + crTag + } + out(w, end) + } +} + +// because roff format requires knowing the column count before outputting any table +// data we need to walk a table tree and count the columns +func countColumns(node *blackfriday.Node) int { + var columns int + + node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.TableRow: + if !entering { + return blackfriday.Terminate + } + case blackfriday.TableCell: + if entering { + columns++ + } + default: + } + return blackfriday.GoToNext + }) + return columns +} + +func out(w io.Writer, output string) { + io.WriteString(w, output) // nolint: errcheck +} + +func needsBackslash(c byte) bool { + for _, r := range []byte("-_&\\~") { + if c == r { + return true + } + } + return false +} + +func escapeSpecialChars(w io.Writer, text []byte) { + for i := 0; i < len(text); i++ { + // escape initial apostrophe or period + if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') { + out(w, "\\&") + } + + // directly copy normal characters + org := i + + for i < len(text) && !needsBackslash(text[i]) { + i++ + } + if i > org { + w.Write(text[org:i]) // nolint: errcheck + } + + // escape a character + if i >= len(text) { + break + } + + w.Write([]byte{'\\', text[i]}) // nolint: errcheck + } +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go index bfb21c818..9c549b60e 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/async/types.go @@ -49,6 +49,10 @@ const ( func (u *LaunchResultBase) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -58,7 +62,7 @@ func (u *LaunchResultBase) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -88,6 +92,10 @@ const ( func (u *LaunchEmptyResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -97,7 +105,7 @@ func (u *LaunchEmptyResult) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth/types.go index f694c07aa..65806a23c 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/auth/types.go @@ -49,9 +49,9 @@ func (u *AccessError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // InvalidAccountType : Current account type cannot access the resource. - InvalidAccountType json.RawMessage `json:"invalid_account_type,omitempty"` + InvalidAccountType *InvalidAccountTypeError `json:"invalid_account_type,omitempty"` // PaperAccessDenied : Current account cannot access Paper. - PaperAccessDenied json.RawMessage `json:"paper_access_denied,omitempty"` + PaperAccessDenied *PaperAccessError `json:"paper_access_denied,omitempty"` } var w wrap var err error @@ -61,13 +61,13 @@ func (u *AccessError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "invalid_account_type": - err = json.Unmarshal(w.InvalidAccountType, &u.InvalidAccountType) + u.InvalidAccountType = w.InvalidAccountType if err != nil { return err } case "paper_access_denied": - err = json.Unmarshal(w.PaperAccessDenied, &u.PaperAccessDenied) + u.PaperAccessDenied = w.PaperAccessDenied if err != nil { return err @@ -79,6 +79,9 @@ func (u *AccessError) UnmarshalJSON(body []byte) error { // AuthError : Errors occurred during authentication. type AuthError struct { dropbox.Tagged + // MissingScope : The access token does not have the required scope to + // access the route. + MissingScope *TokenScopeError `json:"missing_scope,omitempty"` } // Valid tag values for AuthError @@ -88,9 +91,32 @@ const ( AuthErrorInvalidSelectAdmin = "invalid_select_admin" AuthErrorUserSuspended = "user_suspended" AuthErrorExpiredAccessToken = "expired_access_token" + AuthErrorMissingScope = "missing_scope" AuthErrorOther = "other" ) +// UnmarshalJSON deserializes into a AuthError instance +func (u *AuthError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + } + var w wrap + var err error + if err = json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "missing_scope": + err = json.Unmarshal(body, &u.MissingScope) + + if err != nil { + return err + } + } + return nil +} + // InvalidAccountTypeError : has no documentation (yet) type InvalidAccountTypeError struct { dropbox.Tagged @@ -186,3 +212,16 @@ func NewTokenFromOAuth1Result(Oauth2Token string) *TokenFromOAuth1Result { s.Oauth2Token = Oauth2Token return s } + +// TokenScopeError : has no documentation (yet) +type TokenScopeError struct { + // RequiredScope : The required scope to access the route. + RequiredScope string `json:"required_scope"` +} + +// NewTokenScopeError returns a new TokenScopeError instance +func NewTokenScopeError(RequiredScope string) *TokenScopeError { + s := new(TokenScopeError) + s.RequiredScope = RequiredScope + return s +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common/types.go index e1f967e08..2b2313ae9 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common/types.go @@ -52,6 +52,14 @@ const ( func (u *PathRoot) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // Root : Paths are relative to the authenticating user's root namespace + // (This results in `PathRootError.invalid_root` if the user's root + // namespace has changed.). + Root string `json:"root,omitempty"` + // NamespaceId : Paths are relative to given namespace id (This results + // in `PathRootError.no_permission` if you don't have access to this + // namespace.). + NamespaceId string `json:"namespace_id,omitempty"` } var w wrap var err error @@ -61,13 +69,13 @@ func (u *PathRoot) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "root": - err = json.Unmarshal(body, &u.Root) + u.Root = w.Root if err != nil { return err } case "namespace_id": - err = json.Unmarshal(body, &u.NamespaceId) + u.NamespaceId = w.NamespaceId if err != nil { return err @@ -107,7 +115,7 @@ func (u *PathRootError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "invalid_root": - u.InvalidRoot, err = IsRootInfoFromJSON(body) + u.InvalidRoot, err = IsRootInfoFromJSON(w.InvalidRoot) if err != nil { return err @@ -161,10 +169,6 @@ const ( func (u *rootInfoUnion) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Team : has no documentation (yet) - Team json.RawMessage `json:"team,omitempty"` - // User : has no documentation (yet) - User json.RawMessage `json:"user,omitempty"` } var w wrap var err error diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/file_properties/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/file_properties/types.go index 93f945847..58b896717 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/file_properties/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/file_properties/types.go @@ -86,6 +86,8 @@ const ( func (u *TemplateError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` } var w wrap var err error @@ -95,7 +97,7 @@ func (u *TemplateError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err @@ -126,8 +128,10 @@ const ( func (u *PropertiesError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -137,13 +141,13 @@ func (u *PropertiesError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err } case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -176,8 +180,10 @@ const ( func (u *InvalidPropertyGroupError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -187,13 +193,13 @@ func (u *InvalidPropertyGroupError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err } case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -227,8 +233,10 @@ const ( func (u *AddPropertiesError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -238,13 +246,13 @@ func (u *AddPropertiesError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err } case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -388,6 +396,8 @@ const ( func (u *LookupError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // MalformedPath : has no documentation (yet) + MalformedPath string `json:"malformed_path,omitempty"` } var w wrap var err error @@ -397,7 +407,7 @@ func (u *LookupError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "malformed_path": - err = json.Unmarshal(body, &u.MalformedPath) + u.MalformedPath = w.MalformedPath if err != nil { return err @@ -428,6 +438,8 @@ const ( func (u *ModifyTemplateError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` } var w wrap var err error @@ -437,7 +449,7 @@ func (u *ModifyTemplateError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err @@ -522,7 +534,7 @@ func (u *PropertiesSearchError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // PropertyGroupLookup : has no documentation (yet) - PropertyGroupLookup json.RawMessage `json:"property_group_lookup,omitempty"` + PropertyGroupLookup *LookUpPropertiesError `json:"property_group_lookup,omitempty"` } var w wrap var err error @@ -532,7 +544,7 @@ func (u *PropertiesSearchError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "property_group_lookup": - err = json.Unmarshal(w.PropertyGroupLookup, &u.PropertyGroupLookup) + u.PropertyGroupLookup = w.PropertyGroupLookup if err != nil { return err @@ -580,6 +592,8 @@ const ( func (u *PropertiesSearchMode) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // FieldName : Search for a value associated with this field name. + FieldName string `json:"field_name,omitempty"` } var w wrap var err error @@ -589,7 +603,7 @@ func (u *PropertiesSearchMode) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "field_name": - err = json.Unmarshal(body, &u.FieldName) + u.FieldName = w.FieldName if err != nil { return err @@ -768,10 +782,12 @@ const ( func (u *RemovePropertiesError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` // PropertyGroupLookup : has no documentation (yet) - PropertyGroupLookup json.RawMessage `json:"property_group_lookup,omitempty"` + PropertyGroupLookup *LookUpPropertiesError `json:"property_group_lookup,omitempty"` } var w wrap var err error @@ -781,19 +797,19 @@ func (u *RemovePropertiesError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err } case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err } case "property_group_lookup": - err = json.Unmarshal(w.PropertyGroupLookup, &u.PropertyGroupLookup) + u.PropertyGroupLookup = w.PropertyGroupLookup if err != nil { return err @@ -836,7 +852,7 @@ func (u *TemplateFilterBase) UnmarshalJSON(body []byte) error { dropbox.Tagged // FilterSome : Only templates with an ID in the supplied list will be // returned (a subset of templates will be returned). - FilterSome json.RawMessage `json:"filter_some,omitempty"` + FilterSome []string `json:"filter_some,omitempty"` } var w wrap var err error @@ -846,7 +862,7 @@ func (u *TemplateFilterBase) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "filter_some": - err = json.Unmarshal(body, &u.FilterSome) + u.FilterSome = w.FilterSome if err != nil { return err @@ -876,7 +892,7 @@ func (u *TemplateFilter) UnmarshalJSON(body []byte) error { dropbox.Tagged // FilterSome : Only templates with an ID in the supplied list will be // returned (a subset of templates will be returned). - FilterSome json.RawMessage `json:"filter_some,omitempty"` + FilterSome []string `json:"filter_some,omitempty"` } var w wrap var err error @@ -886,7 +902,7 @@ func (u *TemplateFilter) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "filter_some": - err = json.Unmarshal(body, &u.FilterSome) + u.FilterSome = w.FilterSome if err != nil { return err @@ -950,10 +966,12 @@ const ( func (u *UpdatePropertiesError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TemplateNotFound : Template does not exist for the given identifier. + TemplateNotFound string `json:"template_not_found,omitempty"` // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` // PropertyGroupLookup : has no documentation (yet) - PropertyGroupLookup json.RawMessage `json:"property_group_lookup,omitempty"` + PropertyGroupLookup *LookUpPropertiesError `json:"property_group_lookup,omitempty"` } var w wrap var err error @@ -963,19 +981,19 @@ func (u *UpdatePropertiesError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "template_not_found": - err = json.Unmarshal(body, &u.TemplateNotFound) + u.TemplateNotFound = w.TemplateNotFound if err != nil { return err } case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err } case "property_group_lookup": - err = json.Unmarshal(w.PropertyGroupLookup, &u.PropertyGroupLookup) + u.PropertyGroupLookup = w.PropertyGroupLookup if err != nil { return err diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go index 828fb7285..c211a3832 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/client.go @@ -57,7 +57,7 @@ type Client interface { Copy(arg *RelocationArg) (res IsMetadata, err error) // CopyBatch : Copy multiple files or folders to different locations at once // in the user's Dropbox. This route will replace `copyBatch`. The main - // difference is this route will return stutus for each entry, while + // difference is this route will return status for each entry, while // `copyBatch` raises failure if any entry fails. This route will either // finish synchronously, or return a job ID and do the async copy job in // background. Please use `copyBatchCheck` to check the job status. @@ -130,19 +130,23 @@ type Client interface { // total files. The input cannot be a single file. Any single file must be // less than 4GB in size. DownloadZip(arg *DownloadZipArg) (res *DownloadZipResult, content io.ReadCloser, err error) + // Export : Export a file from a user's Dropbox. This route only supports + // exporting files that cannot be downloaded directly and whose + // `ExportResult.file_metadata` has `ExportInfo.export_as` populated. + Export(arg *ExportArg) (res *ExportResult, content io.ReadCloser, err error) // GetMetadata : Returns the metadata for a file or folder. Note: Metadata // for the root folder is unsupported. GetMetadata(arg *GetMetadataArg) (res IsMetadata, err error) // GetPreview : Get a preview for a file. Currently, PDF previews are // generated for files with the following extensions: .ai, .doc, .docm, - // .docx, .eps, .odp, .odt, .pps, .ppsm, .ppsx, .ppt, .pptm, .pptx, .rtf. - // HTML previews are generated for files with the following extensions: - // .csv, .ods, .xls, .xlsm, .xlsx. Other formats will return an unsupported - // extension error. + // .docx, .eps, .gdoc, .gslides, .odp, .odt, .pps, .ppsm, .ppsx, .ppt, + // .pptm, .pptx, .rtf. HTML previews are generated for files with the + // following extensions: .csv, .ods, .xls, .xlsm, .gsheet, .xlsx. Other + // formats will return an unsupported extension error. GetPreview(arg *PreviewArg) (res *FileMetadata, content io.ReadCloser, err error) // GetTemporaryLink : Get a temporary link to stream content of a file. This - // link will expire in four hours and afterwards you will get 410 Gone. So - // this URL should not be used to display content directly in the browser. + // link will expire in four hours and afterwards you will get 410 Gone. This + // URL should not be used to display content directly in the browser. The // Content-Type of the link is determined automatically by the file's mime // type. GetTemporaryLink(arg *GetTemporaryLinkArg) (res *GetTemporaryLinkResult, err error) @@ -246,7 +250,7 @@ type Client interface { Move(arg *RelocationArg) (res IsMetadata, err error) // MoveBatch : Move multiple files or folders to different locations at once // in the user's Dropbox. This route will replace `moveBatch`. The main - // difference is this route will return stutus for each entry, while + // difference is this route will return status for each entry, while // `moveBatch` raises failure if any entry fails. This route will either // finish synchronously, or return a job ID and do the async move job in // background. Please use `moveBatchCheck` to check the job status. @@ -477,7 +481,7 @@ func (dbx *apiImpl) AlphaUpload(arg *CommitInfoWithProperties, content io.Reader headers := map[string]string{ "Content-Type": "application/octet-stream", - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -1636,7 +1640,7 @@ func (dbx *apiImpl) Download(arg *DownloadArg) (res *FileMetadata, content io.Re } headers := map[string]string{ - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } for k, v := range arg.ExtraHeaders { headers[k] = v @@ -1706,7 +1710,7 @@ func (dbx *apiImpl) DownloadZip(arg *DownloadZipArg) (res *DownloadZipResult, co } headers := map[string]string{ - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -1757,6 +1761,73 @@ func (dbx *apiImpl) DownloadZip(arg *DownloadZipArg) (res *DownloadZipResult, co return } +//ExportAPIError is an error-wrapper for the export route +type ExportAPIError struct { + dropbox.APIError + EndpointError *ExportError `json:"error"` +} + +func (dbx *apiImpl) Export(arg *ExportArg) (res *ExportResult, content io.ReadCloser, err error) { + cli := dbx.Client + + dbx.Config.LogDebug("arg: %v", arg) + b, err := json.Marshal(arg) + if err != nil { + return + } + + headers := map[string]string{ + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), + } + if dbx.Config.AsMemberID != "" { + headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID + } + + req, err := (*dropbox.Context)(dbx).NewRequest("content", "download", true, "files", "export", headers, nil) + if err != nil { + return + } + dbx.Config.LogInfo("req: %v", req) + + resp, err := cli.Do(req) + if err != nil { + return + } + + dbx.Config.LogInfo("resp: %v", resp) + body := []byte(resp.Header.Get("Dropbox-API-Result")) + content = resp.Body + dbx.Config.LogDebug("body: %s", body) + if resp.StatusCode == http.StatusOK { + err = json.Unmarshal(body, &res) + if err != nil { + return + } + + return + } + if resp.StatusCode == http.StatusConflict { + defer resp.Body.Close() + body, err = ioutil.ReadAll(resp.Body) + if err != nil { + return + } + var apiError ExportAPIError + err = json.Unmarshal(body, &apiError) + if err != nil { + return + } + err = apiError + return + } + err = auth.HandleCommonAuthErrors(dbx.Config, resp, body) + if err != nil { + return + } + err = dropbox.HandleCommonAPIErrors(dbx.Config, resp, body) + return +} + //GetMetadataAPIError is an error-wrapper for the get_metadata route type GetMetadataAPIError struct { dropbox.APIError @@ -1850,7 +1921,7 @@ func (dbx *apiImpl) GetPreview(arg *PreviewArg) (res *FileMetadata, content io.R } headers := map[string]string{ - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -2049,7 +2120,7 @@ func (dbx *apiImpl) GetThumbnail(arg *ThumbnailArg) (res *FileMetadata, content } headers := map[string]string{ - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -2116,7 +2187,7 @@ func (dbx *apiImpl) GetThumbnailBatch(arg *GetThumbnailBatchArg) (res *GetThumbn } headers := map[string]string{ - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -3625,7 +3696,7 @@ func (dbx *apiImpl) Upload(arg *CommitInfo, content io.Reader) (res *FileMetadat headers := map[string]string{ "Content-Type": "application/octet-stream", - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -3692,7 +3763,7 @@ func (dbx *apiImpl) UploadSessionAppendV2(arg *UploadSessionAppendArg, content i headers := map[string]string{ "Content-Type": "application/octet-stream", - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -3757,7 +3828,7 @@ func (dbx *apiImpl) UploadSessionAppend(arg *UploadSessionCursor, content io.Rea headers := map[string]string{ "Content-Type": "application/octet-stream", - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -3819,7 +3890,7 @@ func (dbx *apiImpl) UploadSessionFinish(arg *UploadSessionFinishArg, content io. headers := map[string]string{ "Content-Type": "application/octet-stream", - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID @@ -4018,7 +4089,7 @@ func (dbx *apiImpl) UploadSessionStart(arg *UploadSessionStartArg, content io.Re headers := map[string]string{ "Content-Type": "application/octet-stream", - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go index fc87512f4..7103ffe04 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files/types.go @@ -95,7 +95,7 @@ func (u *GetMetadataError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -105,7 +105,7 @@ func (u *GetMetadataError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -134,9 +134,9 @@ func (u *AlphaGetMetadataError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` // PropertiesError : has no documentation (yet) - PropertiesError json.RawMessage `json:"properties_error,omitempty"` + PropertiesError *file_properties.LookUpPropertiesError `json:"properties_error,omitempty"` } var w wrap var err error @@ -146,13 +146,13 @@ func (u *AlphaGetMetadataError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err } case "properties_error": - err = json.Unmarshal(w.PropertiesError, &u.PropertiesError) + u.PropertiesError = w.PropertiesError if err != nil { return err @@ -319,10 +319,8 @@ const ( func (u *CreateFolderBatchJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The batch create folder has finished. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : The batch create folder has failed. - Failed json.RawMessage `json:"failed,omitempty"` + Failed *CreateFolderBatchError `json:"failed,omitempty"` } var w wrap var err error @@ -338,7 +336,7 @@ func (u *CreateFolderBatchJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -370,8 +368,10 @@ const ( func (u *CreateFolderBatchLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -381,7 +381,7 @@ func (u *CreateFolderBatchLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -440,10 +440,8 @@ const ( func (u *CreateFolderBatchResultEntry) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : has no documentation (yet) - Success json.RawMessage `json:"success,omitempty"` // Failure : has no documentation (yet) - Failure json.RawMessage `json:"failure,omitempty"` + Failure *CreateFolderEntryError `json:"failure,omitempty"` } var w wrap var err error @@ -459,7 +457,7 @@ func (u *CreateFolderBatchResultEntry) UnmarshalJSON(body []byte) error { return err } case "failure": - err = json.Unmarshal(w.Failure, &u.Failure) + u.Failure = w.Failure if err != nil { return err @@ -486,7 +484,7 @@ func (u *CreateFolderEntryError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *WriteError `json:"path,omitempty"` } var w wrap var err error @@ -496,7 +494,7 @@ func (u *CreateFolderEntryError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -535,7 +533,7 @@ func (u *CreateFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *WriteError `json:"path,omitempty"` } var w wrap var err error @@ -545,7 +543,7 @@ func (u *CreateFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -629,10 +627,8 @@ const ( func (u *DeleteBatchJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The batch delete has finished. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : The batch delete has failed. - Failed json.RawMessage `json:"failed,omitempty"` + Failed *DeleteBatchError `json:"failed,omitempty"` } var w wrap var err error @@ -648,7 +644,7 @@ func (u *DeleteBatchJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -680,8 +676,10 @@ const ( func (u *DeleteBatchLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -691,7 +689,7 @@ func (u *DeleteBatchLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -771,10 +769,8 @@ const ( func (u *DeleteBatchResultEntry) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : has no documentation (yet) - Success json.RawMessage `json:"success,omitempty"` // Failure : has no documentation (yet) - Failure json.RawMessage `json:"failure,omitempty"` + Failure *DeleteError `json:"failure,omitempty"` } var w wrap var err error @@ -790,7 +786,7 @@ func (u *DeleteBatchResultEntry) UnmarshalJSON(body []byte) error { return err } case "failure": - err = json.Unmarshal(w.Failure, &u.Failure) + u.Failure = w.Failure if err != nil { return err @@ -822,9 +818,9 @@ func (u *DeleteError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // PathLookup : has no documentation (yet) - PathLookup json.RawMessage `json:"path_lookup,omitempty"` + PathLookup *LookupError `json:"path_lookup,omitempty"` // PathWrite : has no documentation (yet) - PathWrite json.RawMessage `json:"path_write,omitempty"` + PathWrite *WriteError `json:"path_write,omitempty"` } var w wrap var err error @@ -834,13 +830,13 @@ func (u *DeleteError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path_lookup": - err = json.Unmarshal(w.PathLookup, &u.PathLookup) + u.PathLookup = w.PathLookup if err != nil { return err } case "path_write": - err = json.Unmarshal(w.PathWrite, &u.PathWrite) + u.PathWrite = w.PathWrite if err != nil { return err @@ -939,12 +935,6 @@ const ( func (u *metadataUnion) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // File : has no documentation (yet) - File json.RawMessage `json:"file,omitempty"` - // Folder : has no documentation (yet) - Folder json.RawMessage `json:"folder,omitempty"` - // Deleted : has no documentation (yet) - Deleted json.RawMessage `json:"deleted,omitempty"` } var w wrap var err error @@ -1050,8 +1040,9 @@ type DownloadError struct { // Valid tag values for DownloadError const ( - DownloadErrorPath = "path" - DownloadErrorOther = "other" + DownloadErrorPath = "path" + DownloadErrorUnsupportedFile = "unsupported_file" + DownloadErrorOther = "other" ) // UnmarshalJSON deserializes into a DownloadError instance @@ -1059,7 +1050,7 @@ func (u *DownloadError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1069,7 +1060,7 @@ func (u *DownloadError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1111,7 +1102,7 @@ func (u *DownloadZipError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1121,7 +1112,7 @@ func (u *DownloadZipError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1143,6 +1134,107 @@ func NewDownloadZipResult(Metadata *FolderMetadata) *DownloadZipResult { return s } +// ExportArg : has no documentation (yet) +type ExportArg struct { + // Path : The path of the file to be exported. + Path string `json:"path"` +} + +// NewExportArg returns a new ExportArg instance +func NewExportArg(Path string) *ExportArg { + s := new(ExportArg) + s.Path = Path + return s +} + +// ExportError : has no documentation (yet) +type ExportError struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` +} + +// Valid tag values for ExportError +const ( + ExportErrorPath = "path" + ExportErrorNonExportable = "non_exportable" + ExportErrorOther = "other" +) + +// UnmarshalJSON deserializes into a ExportError instance +func (u *ExportError) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Path : has no documentation (yet) + Path *LookupError `json:"path,omitempty"` + } + var w wrap + var err error + if err = json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "path": + u.Path = w.Path + + if err != nil { + return err + } + } + return nil +} + +// ExportInfo : Export information for a file. +type ExportInfo struct { + // ExportAs : Format to which the file can be exported to. + ExportAs string `json:"export_as,omitempty"` +} + +// NewExportInfo returns a new ExportInfo instance +func NewExportInfo() *ExportInfo { + s := new(ExportInfo) + return s +} + +// ExportMetadata : has no documentation (yet) +type ExportMetadata struct { + // Name : The last component of the path (including extension). This never + // contains a slash. + Name string `json:"name"` + // Size : The file size in bytes. + Size uint64 `json:"size"` + // ExportHash : A hash based on the exported file content. This field can be + // used to verify data integrity. Similar to content hash. For more + // information see our `Content hash` + // page. + ExportHash string `json:"export_hash,omitempty"` +} + +// NewExportMetadata returns a new ExportMetadata instance +func NewExportMetadata(Name string, Size uint64) *ExportMetadata { + s := new(ExportMetadata) + s.Name = Name + s.Size = Size + return s +} + +// ExportResult : has no documentation (yet) +type ExportResult struct { + // ExportMetadata : Metadata for the exported version of the file. + ExportMetadata *ExportMetadata `json:"export_metadata"` + // FileMetadata : Metadata for the original file. + FileMetadata *FileMetadata `json:"file_metadata"` +} + +// NewExportResult returns a new ExportResult instance +func NewExportResult(ExportMetadata *ExportMetadata, FileMetadata *FileMetadata) *ExportResult { + s := new(ExportResult) + s.ExportMetadata = ExportMetadata + s.FileMetadata = FileMetadata + return s +} + // FileMetadata : has no documentation (yet) type FileMetadata struct { Metadata @@ -1162,12 +1254,20 @@ type FileMetadata struct { Rev string `json:"rev"` // Size : The file size in bytes. Size uint64 `json:"size"` - // MediaInfo : Additional information if the file is a photo or video. + // MediaInfo : Additional information if the file is a photo or video. This + // field will not be set on entries returned by `listFolder`, + // `listFolderContinue`, or `getThumbnailBatch`, starting December 2, 2019. MediaInfo *MediaInfo `json:"media_info,omitempty"` // SymlinkInfo : Set if this file is a symlink. SymlinkInfo *SymlinkInfo `json:"symlink_info,omitempty"` // SharingInfo : Set if this file is contained in a shared folder. SharingInfo *FileSharingInfo `json:"sharing_info,omitempty"` + // IsDownloadable : If true, file can be downloaded directly; else the file + // must be exported. + IsDownloadable bool `json:"is_downloadable"` + // ExportInfo : Information about format this file can be exported to. This + // filed must be set if `is_downloadable` is set to false. + ExportInfo *ExportInfo `json:"export_info,omitempty"` // PropertyGroups : Additional information if the file has custom properties // with the property template specified. PropertyGroups []*file_properties.PropertyGroup `json:"property_groups,omitempty"` @@ -1193,6 +1293,7 @@ func NewFileMetadata(Name string, Id string, ClientModified time.Time, ServerMod s.ServerModified = ServerModified s.Rev = Rev s.Size = Size + s.IsDownloadable = true return s } @@ -1313,7 +1414,7 @@ func (u *GetCopyReferenceError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1323,7 +1424,7 @@ func (u *GetCopyReferenceError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1401,8 +1502,10 @@ type GetTemporaryLinkError struct { // Valid tag values for GetTemporaryLinkError const ( - GetTemporaryLinkErrorPath = "path" - GetTemporaryLinkErrorOther = "other" + GetTemporaryLinkErrorPath = "path" + GetTemporaryLinkErrorEmailNotVerified = "email_not_verified" + GetTemporaryLinkErrorUnsupportedFile = "unsupported_file" + GetTemporaryLinkErrorOther = "other" ) // UnmarshalJSON deserializes into a GetTemporaryLinkError instance @@ -1410,7 +1513,7 @@ func (u *GetTemporaryLinkError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1420,7 +1523,7 @@ func (u *GetTemporaryLinkError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1552,10 +1655,8 @@ const ( func (u *GetThumbnailBatchResultEntry) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : has no documentation (yet) - Success json.RawMessage `json:"success,omitempty"` // Failure : The result for this file if it was an error. - Failure json.RawMessage `json:"failure,omitempty"` + Failure *ThumbnailError `json:"failure,omitempty"` } var w wrap var err error @@ -1571,7 +1672,7 @@ func (u *GetThumbnailBatchResultEntry) UnmarshalJSON(body []byte) error { return err } case "failure": - err = json.Unmarshal(w.Failure, &u.Failure) + u.Failure = w.Failure if err != nil { return err @@ -1605,7 +1706,8 @@ type ListFolderArg struct { // all subfolders. Recursive bool `json:"recursive"` // IncludeMediaInfo : If true, `FileMetadata.media_info` is set for photo - // and video. + // and video. This parameter will no longer have an effect starting December + // 2, 2019. IncludeMediaInfo bool `json:"include_media_info"` // IncludeDeleted : If true, the results will include entries for files and // folders that used to exist but were deleted. @@ -1630,6 +1732,9 @@ type ListFolderArg struct { // `FileMetadata.property_groups` is set if there exists property data // associated with the file and each of the listed templates. IncludePropertyGroups *file_properties.TemplateFilterBase `json:"include_property_groups,omitempty"` + // IncludeNonDownloadableFiles : If true, include files that are not + // downloadable, i.e. Google Docs. + IncludeNonDownloadableFiles bool `json:"include_non_downloadable_files"` } // NewListFolderArg returns a new ListFolderArg instance @@ -1641,6 +1746,7 @@ func NewListFolderArg(Path string) *ListFolderArg { s.IncludeDeleted = false s.IncludeHasExplicitSharedMembers = false s.IncludeMountedFolders = true + s.IncludeNonDownloadableFiles = true return s } @@ -1677,7 +1783,7 @@ func (u *ListFolderContinueError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1687,7 +1793,7 @@ func (u *ListFolderContinueError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1714,7 +1820,7 @@ func (u *ListFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1724,7 +1830,7 @@ func (u *ListFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1884,7 +1990,7 @@ func (u *ListRevisionsError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -1894,7 +2000,7 @@ func (u *ListRevisionsError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1947,12 +2053,13 @@ type LookupError struct { // Valid tag values for LookupError const ( - LookupErrorMalformedPath = "malformed_path" - LookupErrorNotFound = "not_found" - LookupErrorNotFile = "not_file" - LookupErrorNotFolder = "not_folder" - LookupErrorRestrictedContent = "restricted_content" - LookupErrorOther = "other" + LookupErrorMalformedPath = "malformed_path" + LookupErrorNotFound = "not_found" + LookupErrorNotFile = "not_file" + LookupErrorNotFolder = "not_folder" + LookupErrorRestrictedContent = "restricted_content" + LookupErrorUnsupportedContentType = "unsupported_content_type" + LookupErrorOther = "other" ) // UnmarshalJSON deserializes into a LookupError instance @@ -1963,7 +2070,7 @@ func (u *LookupError) UnmarshalJSON(body []byte) error { // format. Please refer to the `Path formats documentation` // // for more information. - MalformedPath json.RawMessage `json:"malformed_path,omitempty"` + MalformedPath string `json:"malformed_path,omitempty"` } var w wrap var err error @@ -1973,7 +2080,7 @@ func (u *LookupError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "malformed_path": - err = json.Unmarshal(body, &u.MalformedPath) + u.MalformedPath = w.MalformedPath if err != nil { return err @@ -2010,7 +2117,7 @@ func (u *MediaInfo) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "metadata": - u.Metadata, err = IsMediaMetadataFromJSON(body) + u.Metadata, err = IsMediaMetadataFromJSON(w.Metadata) if err != nil { return err @@ -2061,10 +2168,6 @@ const ( func (u *mediaMetadataUnion) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Photo : has no documentation (yet) - Photo json.RawMessage `json:"photo,omitempty"` - // Video : has no documentation (yet) - Video json.RawMessage `json:"video,omitempty"` } var w wrap var err error @@ -2188,7 +2291,7 @@ func (u *PreviewError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : An error occurs when downloading metadata for the file. - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -2198,7 +2301,7 @@ func (u *PreviewError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -2298,6 +2401,7 @@ const ( RelocationErrorCantTransferOwnership = "cant_transfer_ownership" RelocationErrorInsufficientQuota = "insufficient_quota" RelocationErrorInternalError = "internal_error" + RelocationErrorCantMoveSharedFolder = "cant_move_shared_folder" RelocationErrorOther = "other" ) @@ -2306,11 +2410,11 @@ func (u *RelocationError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // FromLookup : has no documentation (yet) - FromLookup json.RawMessage `json:"from_lookup,omitempty"` + FromLookup *LookupError `json:"from_lookup,omitempty"` // FromWrite : has no documentation (yet) - FromWrite json.RawMessage `json:"from_write,omitempty"` + FromWrite *WriteError `json:"from_write,omitempty"` // To : has no documentation (yet) - To json.RawMessage `json:"to,omitempty"` + To *WriteError `json:"to,omitempty"` } var w wrap var err error @@ -2320,19 +2424,19 @@ func (u *RelocationError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "from_lookup": - err = json.Unmarshal(w.FromLookup, &u.FromLookup) + u.FromLookup = w.FromLookup if err != nil { return err } case "from_write": - err = json.Unmarshal(w.FromWrite, &u.FromWrite) + u.FromWrite = w.FromWrite if err != nil { return err } case "to": - err = json.Unmarshal(w.To, &u.To) + u.To = w.To if err != nil { return err @@ -2365,6 +2469,7 @@ const ( RelocationBatchErrorCantTransferOwnership = "cant_transfer_ownership" RelocationBatchErrorInsufficientQuota = "insufficient_quota" RelocationBatchErrorInternalError = "internal_error" + RelocationBatchErrorCantMoveSharedFolder = "cant_move_shared_folder" RelocationBatchErrorOther = "other" RelocationBatchErrorTooManyWriteOperations = "too_many_write_operations" ) @@ -2374,11 +2479,11 @@ func (u *RelocationBatchError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // FromLookup : has no documentation (yet) - FromLookup json.RawMessage `json:"from_lookup,omitempty"` + FromLookup *LookupError `json:"from_lookup,omitempty"` // FromWrite : has no documentation (yet) - FromWrite json.RawMessage `json:"from_write,omitempty"` + FromWrite *WriteError `json:"from_write,omitempty"` // To : has no documentation (yet) - To json.RawMessage `json:"to,omitempty"` + To *WriteError `json:"to,omitempty"` } var w wrap var err error @@ -2388,19 +2493,19 @@ func (u *RelocationBatchError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "from_lookup": - err = json.Unmarshal(w.FromLookup, &u.FromLookup) + u.FromLookup = w.FromLookup if err != nil { return err } case "from_write": - err = json.Unmarshal(w.FromWrite, &u.FromWrite) + u.FromWrite = w.FromWrite if err != nil { return err } case "to": - err = json.Unmarshal(w.To, &u.To) + u.To = w.To if err != nil { return err @@ -2429,7 +2534,7 @@ func (u *RelocationBatchErrorEntry) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // RelocationError : User errors that retry won't help. - RelocationError json.RawMessage `json:"relocation_error,omitempty"` + RelocationError *RelocationError `json:"relocation_error,omitempty"` } var w wrap var err error @@ -2439,7 +2544,7 @@ func (u *RelocationBatchErrorEntry) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "relocation_error": - err = json.Unmarshal(w.RelocationError, &u.RelocationError) + u.RelocationError = w.RelocationError if err != nil { return err @@ -2468,10 +2573,8 @@ const ( func (u *RelocationBatchJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The copy or move batch job has finished. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : The copy or move batch job has failed with exception. - Failed json.RawMessage `json:"failed,omitempty"` + Failed *RelocationBatchError `json:"failed,omitempty"` } var w wrap var err error @@ -2487,7 +2590,7 @@ func (u *RelocationBatchJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -2519,8 +2622,10 @@ const ( func (u *RelocationBatchLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -2530,7 +2635,7 @@ func (u *RelocationBatchLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -2613,7 +2718,7 @@ func (u *RelocationBatchResultEntry) UnmarshalJSON(body []byte) error { // Success : has no documentation (yet) Success json.RawMessage `json:"success,omitempty"` // Failure : has no documentation (yet) - Failure json.RawMessage `json:"failure,omitempty"` + Failure *RelocationBatchErrorEntry `json:"failure,omitempty"` } var w wrap var err error @@ -2623,13 +2728,13 @@ func (u *RelocationBatchResultEntry) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "success": - u.Success, err = IsMetadataFromJSON(body) + u.Success, err = IsMetadataFromJSON(w.Success) if err != nil { return err } case "failure": - err = json.Unmarshal(w.Failure, &u.Failure) + u.Failure = w.Failure if err != nil { return err @@ -2638,8 +2743,9 @@ func (u *RelocationBatchResultEntry) UnmarshalJSON(body []byte) error { return nil } -// RelocationBatchV2JobStatus : Result returned by `copyBatch` or `moveBatch` -// that may either launch an asynchronous job or complete synchronously. +// RelocationBatchV2JobStatus : Result returned by `copyBatchCheck` or +// `moveBatchCheck` that may either be in progress or completed with result for +// each entry. type RelocationBatchV2JobStatus struct { dropbox.Tagged // Complete : The copy or move batch job has finished. @@ -2656,8 +2762,6 @@ const ( func (u *RelocationBatchV2JobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The copy or move batch job has finished. - Complete json.RawMessage `json:"complete,omitempty"` } var w wrap var err error @@ -2698,8 +2802,10 @@ const ( func (u *RelocationBatchV2Launch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -2709,7 +2815,7 @@ func (u *RelocationBatchV2Launch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -2810,10 +2916,10 @@ func (u *RestoreError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // PathLookup : An error occurs when downloading metadata for the file. - PathLookup json.RawMessage `json:"path_lookup,omitempty"` + PathLookup *LookupError `json:"path_lookup,omitempty"` // PathWrite : An error occurs when trying to restore the file to that // path. - PathWrite json.RawMessage `json:"path_write,omitempty"` + PathWrite *WriteError `json:"path_write,omitempty"` } var w wrap var err error @@ -2823,13 +2929,13 @@ func (u *RestoreError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path_lookup": - err = json.Unmarshal(w.PathLookup, &u.PathLookup) + u.PathLookup = w.PathLookup if err != nil { return err } case "path_write": - err = json.Unmarshal(w.PathWrite, &u.PathWrite) + u.PathWrite = w.PathWrite if err != nil { return err @@ -2876,7 +2982,7 @@ func (u *SaveCopyReferenceError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *WriteError `json:"path,omitempty"` } var w wrap var err error @@ -2886,7 +2992,7 @@ func (u *SaveCopyReferenceError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -2965,7 +3071,7 @@ func (u *SaveUrlError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *WriteError `json:"path,omitempty"` } var w wrap var err error @@ -2975,7 +3081,7 @@ func (u *SaveUrlError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -3004,10 +3110,8 @@ const ( func (u *SaveUrlJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : Metadata of the file where the URL is saved to. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : has no documentation (yet) - Failed json.RawMessage `json:"failed,omitempty"` + Failed *SaveUrlError `json:"failed,omitempty"` } var w wrap var err error @@ -3023,7 +3127,7 @@ func (u *SaveUrlJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -3053,8 +3157,10 @@ const ( func (u *SaveUrlResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : Metadata of the file where the URL is saved to. - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -3064,7 +3170,7 @@ func (u *SaveUrlResult) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -3127,7 +3233,7 @@ func (u *SearchError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -3137,7 +3243,7 @@ func (u *SearchError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -3301,7 +3407,7 @@ func (u *SyncSettingsError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -3311,7 +3417,7 @@ func (u *SyncSettingsError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -3364,7 +3470,7 @@ func (u *ThumbnailError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : An error occurs when downloading metadata for the image. - Path json.RawMessage `json:"path,omitempty"` + Path *LookupError `json:"path,omitempty"` } var w wrap var err error @@ -3374,7 +3480,7 @@ func (u *ThumbnailError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -3445,11 +3551,9 @@ const ( func (u *UploadError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Path : Unable to save the uploaded contents to a file. - Path json.RawMessage `json:"path,omitempty"` // PropertiesError : The supplied property group is invalid. The file // has uploaded without property groups. - PropertiesError json.RawMessage `json:"properties_error,omitempty"` + PropertiesError *file_properties.InvalidPropertyGroupError `json:"properties_error,omitempty"` } var w wrap var err error @@ -3465,7 +3569,7 @@ func (u *UploadError) UnmarshalJSON(body []byte) error { return err } case "properties_error": - err = json.Unmarshal(w.PropertiesError, &u.PropertiesError) + u.PropertiesError = w.PropertiesError if err != nil { return err @@ -3495,11 +3599,9 @@ const ( func (u *UploadErrorWithProperties) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Path : Unable to save the uploaded contents to a file. - Path json.RawMessage `json:"path,omitempty"` // PropertiesError : The supplied property group is invalid. The file // has uploaded without property groups. - PropertiesError json.RawMessage `json:"properties_error,omitempty"` + PropertiesError *file_properties.InvalidPropertyGroupError `json:"properties_error,omitempty"` } var w wrap var err error @@ -3515,7 +3617,7 @@ func (u *UploadErrorWithProperties) UnmarshalJSON(body []byte) error { return err } case "properties_error": - err = json.Unmarshal(w.PropertiesError, &u.PropertiesError) + u.PropertiesError = w.PropertiesError if err != nil { return err @@ -3606,8 +3708,6 @@ const ( func (u *UploadSessionFinishBatchJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The `uploadSessionFinishBatch` has finished. - Complete json.RawMessage `json:"complete,omitempty"` } var w wrap var err error @@ -3650,8 +3750,10 @@ const ( func (u *UploadSessionFinishBatchLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -3661,7 +3763,7 @@ func (u *UploadSessionFinishBatchLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -3709,10 +3811,8 @@ const ( func (u *UploadSessionFinishBatchResultEntry) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : has no documentation (yet) - Success json.RawMessage `json:"success,omitempty"` // Failure : has no documentation (yet) - Failure json.RawMessage `json:"failure,omitempty"` + Failure *UploadSessionFinishError `json:"failure,omitempty"` } var w wrap var err error @@ -3728,7 +3828,7 @@ func (u *UploadSessionFinishBatchResultEntry) UnmarshalJSON(body []byte) error { return err } case "failure": - err = json.Unmarshal(w.Failure, &u.Failure) + u.Failure = w.Failure if err != nil { return err @@ -3768,14 +3868,14 @@ func (u *UploadSessionFinishError) UnmarshalJSON(body []byte) error { dropbox.Tagged // LookupFailed : The session arguments are incorrect; the value // explains the reason. - LookupFailed json.RawMessage `json:"lookup_failed,omitempty"` + LookupFailed *UploadSessionLookupError `json:"lookup_failed,omitempty"` // Path : Unable to save the uploaded contents to a file. Data has // already been appended to the upload session. Please retry with empty // data body and updated offset. - Path json.RawMessage `json:"path,omitempty"` + Path *WriteError `json:"path,omitempty"` // PropertiesError : The supplied property group is invalid. The file // has uploaded without property groups. - PropertiesError json.RawMessage `json:"properties_error,omitempty"` + PropertiesError *file_properties.InvalidPropertyGroupError `json:"properties_error,omitempty"` } var w wrap var err error @@ -3785,19 +3885,19 @@ func (u *UploadSessionFinishError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "lookup_failed": - err = json.Unmarshal(w.LookupFailed, &u.LookupFailed) + u.LookupFailed = w.LookupFailed if err != nil { return err } case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err } case "properties_error": - err = json.Unmarshal(w.PropertiesError, &u.PropertiesError) + u.PropertiesError = w.PropertiesError if err != nil { return err @@ -3830,11 +3930,6 @@ const ( func (u *UploadSessionLookupError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // IncorrectOffset : The specified offset was incorrect. See the value - // for the correct offset. This error may occur when a previous request - // was received and processed successfully but the client did not - // receive the response, e.g. due to a network error. - IncorrectOffset json.RawMessage `json:"incorrect_offset,omitempty"` } var w wrap var err error @@ -3972,10 +4067,10 @@ func (u *WriteError) UnmarshalJSON(body []byte) error { // format. Please refer to the `Path formats documentation` // // for more information. - MalformedPath json.RawMessage `json:"malformed_path,omitempty"` + MalformedPath string `json:"malformed_path,omitempty"` // Conflict : Couldn't write to the target path because there was // something in the way. - Conflict json.RawMessage `json:"conflict,omitempty"` + Conflict *WriteConflictError `json:"conflict,omitempty"` } var w wrap var err error @@ -3985,13 +4080,13 @@ func (u *WriteError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "malformed_path": - err = json.Unmarshal(body, &u.MalformedPath) + u.MalformedPath = w.MalformedPath if err != nil { return err } case "conflict": - err = json.Unmarshal(w.Conflict, &u.Conflict) + u.Conflict = w.Conflict if err != nil { return err @@ -4029,6 +4124,12 @@ const ( func (u *WriteMode) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // Update : Overwrite if the given "rev" matches the existing file's + // "rev". The autorename strategy is to append the string "conflicted + // copy" to the file name. For example, "document.txt" might become + // "document (conflicted copy).txt" or "document (Panda's conflicted + // copy).txt". + Update string `json:"update,omitempty"` } var w wrap var err error @@ -4038,7 +4139,7 @@ func (u *WriteMode) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "update": - err = json.Unmarshal(body, &u.Update) + u.Update = w.Update if err != nil { return err diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go index 6744fd4ce..46c49665b 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sdk.go @@ -21,6 +21,7 @@ package dropbox import ( + "bytes" "encoding/json" "fmt" "io" @@ -37,8 +38,8 @@ const ( hostAPI = "api" hostContent = "content" hostNotify = "notify" - sdkVersion = "5.4.0" - specVersion = "097e9ba" + sdkVersion = "5.6.0" + specVersion = "0e697d7" ) // Version returns the current SDK version and API Spec version @@ -221,3 +222,20 @@ func HandleCommonAPIErrors(c Config, resp *http.Response, body []byte) error { } return apiError } + +// HTTPHeaderSafeJSON encode the JSON passed in b []byte passed in in +// a way that is suitable for HTTP headers. +// +// See: https://www.dropbox.com/developers/reference/json-encoding +func HTTPHeaderSafeJSON(b []byte) string { + var s bytes.Buffer + s.Grow(len(b)) + for _, r := range string(b) { + if r >= 0x007f { + fmt.Fprintf(&s, "\\u%04x", r) + } else { + s.WriteRune(r) + } + } + return s.String() +} diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/seen_state/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/seen_state/types.go index 1afac6e83..be6120a33 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/seen_state/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/seen_state/types.go @@ -30,9 +30,12 @@ type PlatformType struct { // Valid tag values for PlatformType const ( - PlatformTypeWeb = "web" - PlatformTypeMobile = "mobile" - PlatformTypeDesktop = "desktop" - PlatformTypeUnknown = "unknown" - PlatformTypeOther = "other" + PlatformTypeWeb = "web" + PlatformTypeDesktop = "desktop" + PlatformTypeMobileIos = "mobile_ios" + PlatformTypeMobileAndroid = "mobile_android" + PlatformTypeApi = "api" + PlatformTypeUnknown = "unknown" + PlatformTypeMobile = "mobile" + PlatformTypeOther = "other" ) diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/client.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/client.go index 4112ccdba..41f6f284c 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/client.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/client.go @@ -966,7 +966,7 @@ func (dbx *apiImpl) GetSharedLinkFile(arg *GetSharedLinkMetadataArg) (res IsShar } headers := map[string]string{ - "Dropbox-API-Arg": string(b), + "Dropbox-API-Arg": dropbox.HTTPHeaderSafeJSON(b), } if dbx.Config.AsMemberID != "" { headers["Dropbox-API-Select-User"] = dbx.Config.AsMemberID diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/types.go index 2ae5b47af..4e84fb522 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing/types.go @@ -128,9 +128,9 @@ func (u *AddFileMemberError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : has no documentation (yet) - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -140,13 +140,13 @@ func (u *AddFileMemberError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -198,6 +198,7 @@ type AddFolderMemberError struct { const ( AddFolderMemberErrorAccessError = "access_error" AddFolderMemberErrorEmailUnverified = "email_unverified" + AddFolderMemberErrorBannedMember = "banned_member" AddFolderMemberErrorBadMember = "bad_member" AddFolderMemberErrorCantShareOutsideTeam = "cant_share_outside_team" AddFolderMemberErrorTooManyMembers = "too_many_members" @@ -215,10 +216,15 @@ func (u *AddFolderMemberError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : Unable to access shared folder. - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` // BadMember : `AddFolderMemberArg.members` contains a bad invitation // recipient. - BadMember json.RawMessage `json:"bad_member,omitempty"` + BadMember *AddMemberSelectorError `json:"bad_member,omitempty"` + // TooManyMembers : The value is the member limit that was reached. + TooManyMembers uint64 `json:"too_many_members,omitempty"` + // TooManyPendingInvites : The value is the pending invite limit that + // was reached. + TooManyPendingInvites uint64 `json:"too_many_pending_invites,omitempty"` } var w wrap var err error @@ -228,25 +234,25 @@ func (u *AddFolderMemberError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "bad_member": - err = json.Unmarshal(w.BadMember, &u.BadMember) + u.BadMember = w.BadMember if err != nil { return err } case "too_many_members": - err = json.Unmarshal(body, &u.TooManyMembers) + u.TooManyMembers = w.TooManyMembers if err != nil { return err } case "too_many_pending_invites": - err = json.Unmarshal(body, &u.TooManyPendingInvites) + u.TooManyPendingInvites = w.TooManyPendingInvites if err != nil { return err @@ -301,6 +307,14 @@ const ( func (u *AddMemberSelectorError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // InvalidDropboxId : The value is the ID that could not be identified. + InvalidDropboxId string `json:"invalid_dropbox_id,omitempty"` + // InvalidEmail : The value is the e-email address that is malformed. + InvalidEmail string `json:"invalid_email,omitempty"` + // UnverifiedDropboxId : The value is the ID of the Dropbox user with an + // unverified e-mail address. Invite unverified users by e-mail address + // instead of by their Dropbox ID. + UnverifiedDropboxId string `json:"unverified_dropbox_id,omitempty"` } var w wrap var err error @@ -310,19 +324,19 @@ func (u *AddMemberSelectorError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "invalid_dropbox_id": - err = json.Unmarshal(body, &u.InvalidDropboxId) + u.InvalidDropboxId = w.InvalidDropboxId if err != nil { return err } case "invalid_email": - err = json.Unmarshal(body, &u.InvalidEmail) + u.InvalidEmail = w.InvalidEmail if err != nil { return err } case "unverified_dropbox_id": - err = json.Unmarshal(body, &u.UnverifiedDropboxId) + u.UnverifiedDropboxId = w.UnverifiedDropboxId if err != nil { return err @@ -449,10 +463,6 @@ const ( func (u *linkMetadataUnion) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` - // Collection : has no documentation (yet) - Collection json.RawMessage `json:"collection,omitempty"` } var w wrap var err error @@ -545,7 +555,7 @@ func (u *CreateSharedLinkError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *files.LookupError `json:"path,omitempty"` } var w wrap var err error @@ -555,7 +565,7 @@ func (u *CreateSharedLinkError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -584,6 +594,10 @@ type CreateSharedLinkWithSettingsError struct { dropbox.Tagged // Path : has no documentation (yet) Path *files.LookupError `json:"path,omitempty"` + // SharedLinkAlreadyExists : The shared link already exists. You can call + // `listSharedLinks` to get the existing link, or use the provided metadata + // if it is returned. + SharedLinkAlreadyExists *SharedLinkAlreadyExistsMetadata `json:"shared_link_already_exists,omitempty"` // SettingsError : There is an error with the given settings. SettingsError *SharedLinkSettingsError `json:"settings_error,omitempty"` } @@ -602,9 +616,13 @@ func (u *CreateSharedLinkWithSettingsError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *files.LookupError `json:"path,omitempty"` + // SharedLinkAlreadyExists : The shared link already exists. You can + // call `listSharedLinks` to get the existing link, or use the provided + // metadata if it is returned. + SharedLinkAlreadyExists *SharedLinkAlreadyExistsMetadata `json:"shared_link_already_exists,omitempty"` // SettingsError : There is an error with the given settings. - SettingsError json.RawMessage `json:"settings_error,omitempty"` + SettingsError *SharedLinkSettingsError `json:"settings_error,omitempty"` } var w wrap var err error @@ -614,13 +632,19 @@ func (u *CreateSharedLinkWithSettingsError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path + + if err != nil { + return err + } + case "shared_link_already_exists": + u.SharedLinkAlreadyExists = w.SharedLinkAlreadyExists if err != nil { return err } case "settings_error": - err = json.Unmarshal(w.SettingsError, &u.SettingsError) + u.SettingsError = w.SettingsError if err != nil { return err @@ -698,6 +722,8 @@ const ( FileActionRelinquishMembership = "relinquish_membership" FileActionShareLink = "share_link" FileActionCreateLink = "create_link" + FileActionCreateViewLink = "create_view_link" + FileActionCreateEditLink = "create_edit_link" FileActionOther = "other" ) @@ -726,6 +752,14 @@ const ( func (u *FileErrorResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // FileNotFoundError : File specified by id was not found. + FileNotFoundError string `json:"file_not_found_error,omitempty"` + // InvalidFileActionError : User does not have permission to take the + // specified action on the file. + InvalidFileActionError string `json:"invalid_file_action_error,omitempty"` + // PermissionDeniedError : User does not have permission to access file + // specified by file.Id. + PermissionDeniedError string `json:"permission_denied_error,omitempty"` } var w wrap var err error @@ -735,19 +769,19 @@ func (u *FileErrorResult) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "file_not_found_error": - err = json.Unmarshal(body, &u.FileNotFoundError) + u.FileNotFoundError = w.FileNotFoundError if err != nil { return err } case "invalid_file_action_error": - err = json.Unmarshal(body, &u.InvalidFileActionError) + u.InvalidFileActionError = w.InvalidFileActionError if err != nil { return err } case "permission_denied_error": - err = json.Unmarshal(body, &u.PermissionDeniedError) + u.PermissionDeniedError = w.PermissionDeniedError if err != nil { return err @@ -817,10 +851,6 @@ const ( func (u *sharedLinkMetadataUnion) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // File : has no documentation (yet) - File json.RawMessage `json:"file,omitempty"` - // Folder : has no documentation (yet) - Folder json.RawMessage `json:"folder,omitempty"` } var w wrap var err error @@ -920,11 +950,7 @@ func (u *FileMemberActionError) UnmarshalJSON(body []byte) error { dropbox.Tagged // AccessError : Specified file was invalid or user does not have // access. - AccessError json.RawMessage `json:"access_error,omitempty"` - // NoExplicitAccess : The action cannot be completed because the target - // member does not have explicit access to the file. The return value is - // the access that the member has to the file from a parent folder. - NoExplicitAccess json.RawMessage `json:"no_explicit_access,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -934,7 +960,7 @@ func (u *FileMemberActionError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -972,9 +998,9 @@ func (u *FileMemberActionIndividualResult) UnmarshalJSON(body []byte) error { // Success : Member was successfully removed from this file. If // AccessLevel is given, the member still has access via a parent shared // folder. - Success json.RawMessage `json:"success,omitempty"` + Success *AccessLevel `json:"success,omitempty"` // MemberError : User was not able to perform this action. - MemberError json.RawMessage `json:"member_error,omitempty"` + MemberError *FileMemberActionError `json:"member_error,omitempty"` } var w wrap var err error @@ -984,13 +1010,13 @@ func (u *FileMemberActionIndividualResult) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "success": - err = json.Unmarshal(body, &u.Success) + u.Success = w.Success if err != nil { return err } case "member_error": - err = json.Unmarshal(w.MemberError, &u.MemberError) + u.MemberError = w.MemberError if err != nil { return err @@ -1036,10 +1062,8 @@ const ( func (u *FileMemberRemoveActionResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : Member was successfully removed from this file. - Success json.RawMessage `json:"success,omitempty"` // MemberError : User was not able to remove this member. - MemberError json.RawMessage `json:"member_error,omitempty"` + MemberError *FileMemberActionError `json:"member_error,omitempty"` } var w wrap var err error @@ -1055,7 +1079,7 @@ func (u *FileMemberRemoveActionResult) UnmarshalJSON(body []byte) error { return err } case "member_error": - err = json.Unmarshal(w.MemberError, &u.MemberError) + u.MemberError = w.MemberError if err != nil { return err @@ -1245,9 +1269,9 @@ func (u *GetFileMetadataError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : has no documentation (yet) - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -1257,13 +1281,13 @@ func (u *GetFileMetadataError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -1292,10 +1316,8 @@ const ( func (u *GetFileMetadataIndividualResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Metadata : The result for this file if it was successful. - Metadata json.RawMessage `json:"metadata,omitempty"` // AccessError : The result for this file if it was an error. - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -1311,7 +1333,7 @@ func (u *GetFileMetadataIndividualResult) UnmarshalJSON(body []byte) error { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -1415,7 +1437,7 @@ func (u *GetSharedLinksError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path string `json:"path,omitempty"` } var w wrap var err error @@ -1425,7 +1447,7 @@ func (u *GetSharedLinksError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(body, &u.Path) + u.Path = w.Path if err != nil { return err @@ -1591,6 +1613,8 @@ const ( func (u *InviteeInfo) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // Email : E-mail address of invited user. + Email string `json:"email,omitempty"` } var w wrap var err error @@ -1600,7 +1624,7 @@ func (u *InviteeInfo) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "email": - err = json.Unmarshal(body, &u.Email) + u.Email = w.Email if err != nil { return err @@ -1657,13 +1681,13 @@ func (u *JobError) UnmarshalJSON(body []byte) error { dropbox.Tagged // UnshareFolderError : Error occurred while performing `unshareFolder` // action. - UnshareFolderError json.RawMessage `json:"unshare_folder_error,omitempty"` + UnshareFolderError *UnshareFolderError `json:"unshare_folder_error,omitempty"` // RemoveFolderMemberError : Error occurred while performing // `removeFolderMember` action. - RemoveFolderMemberError json.RawMessage `json:"remove_folder_member_error,omitempty"` + RemoveFolderMemberError *RemoveFolderMemberError `json:"remove_folder_member_error,omitempty"` // RelinquishFolderMembershipError : Error occurred while performing // `relinquishFolderMembership` action. - RelinquishFolderMembershipError json.RawMessage `json:"relinquish_folder_membership_error,omitempty"` + RelinquishFolderMembershipError *RelinquishFolderMembershipError `json:"relinquish_folder_membership_error,omitempty"` } var w wrap var err error @@ -1673,19 +1697,19 @@ func (u *JobError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "unshare_folder_error": - err = json.Unmarshal(w.UnshareFolderError, &u.UnshareFolderError) + u.UnshareFolderError = w.UnshareFolderError if err != nil { return err } case "remove_folder_member_error": - err = json.Unmarshal(w.RemoveFolderMemberError, &u.RemoveFolderMemberError) + u.RemoveFolderMemberError = w.RemoveFolderMemberError if err != nil { return err } case "relinquish_folder_membership_error": - err = json.Unmarshal(w.RelinquishFolderMembershipError, &u.RelinquishFolderMembershipError) + u.RelinquishFolderMembershipError = w.RelinquishFolderMembershipError if err != nil { return err @@ -1713,7 +1737,7 @@ func (u *JobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Failed : The asynchronous job returned an error. - Failed json.RawMessage `json:"failed,omitempty"` + Failed *JobError `json:"failed,omitempty"` } var w wrap var err error @@ -1723,7 +1747,7 @@ func (u *JobStatus) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -1732,6 +1756,18 @@ func (u *JobStatus) UnmarshalJSON(body []byte) error { return nil } +// LinkAccessLevel : has no documentation (yet) +type LinkAccessLevel struct { + dropbox.Tagged +} + +// Valid tag values for LinkAccessLevel +const ( + LinkAccessLevelViewer = "viewer" + LinkAccessLevelEditor = "editor" + LinkAccessLevelOther = "other" +) + // LinkAction : Actions that can be performed on a link. type LinkAction struct { dropbox.Tagged @@ -1755,11 +1791,12 @@ type LinkAudience struct { // Valid tag values for LinkAudience const ( - LinkAudiencePublic = "public" - LinkAudienceTeam = "team" - LinkAudienceNoOne = "no_one" - LinkAudienceMembers = "members" - LinkAudienceOther = "other" + LinkAudiencePublic = "public" + LinkAudienceTeam = "team" + LinkAudienceNoOne = "no_one" + LinkAudiencePassword = "password" + LinkAudienceMembers = "members" + LinkAudienceOther = "other" ) // LinkExpiry : has no documentation (yet) @@ -1780,6 +1817,8 @@ const ( func (u *LinkExpiry) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // SetExpiry : Set a new expiry or change an existing expiry. + SetExpiry time.Time `json:"set_expiry,omitempty"` } var w wrap var err error @@ -1789,7 +1828,7 @@ func (u *LinkExpiry) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "set_expiry": - err = json.Unmarshal(body, &u.SetExpiry) + u.SetExpiry = w.SetExpiry if err != nil { return err @@ -1816,6 +1855,8 @@ const ( func (u *LinkPassword) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // SetPassword : Set a new password or change an existing password. + SetPassword string `json:"set_password,omitempty"` } var w wrap var err error @@ -1825,7 +1866,7 @@ func (u *LinkPassword) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "set_password": - err = json.Unmarshal(body, &u.SetPassword) + u.SetPassword = w.SetPassword if err != nil { return err @@ -1858,18 +1899,32 @@ type LinkPermissions struct { // the shared links policies of the the team (in case the link's owner is // part of a team) and the shared folder (in case the linked file is part of // a shared folder). This field is shown only if the caller has access to - // this info (the link's owner always has access to this data). + // this info (the link's owner always has access to this data). For some + // links, an effective_audience value is returned instead. ResolvedVisibility *ResolvedVisibility `json:"resolved_visibility,omitempty"` // RequestedVisibility : The shared link's requested visibility. This can be // overridden by the team and shared folder policies. The final visibility, // after considering these policies, can be found in `resolved_visibility`. - // This is shown only if the caller is the link's owner. + // This is shown only if the caller is the link's owner and + // resolved_visibility is returned instead of effective_audience. RequestedVisibility *RequestedVisibility `json:"requested_visibility,omitempty"` // CanRevoke : Whether the caller can revoke the shared link. CanRevoke bool `json:"can_revoke"` // RevokeFailureReason : The failure reason for revoking the link. This // field will only be present if the `can_revoke` is false. RevokeFailureReason *SharedLinkAccessFailureReason `json:"revoke_failure_reason,omitempty"` + // EffectiveAudience : The type of audience who can benefit from the access + // level specified by the `link_access_level` field. + EffectiveAudience *LinkAudience `json:"effective_audience,omitempty"` + // LinkAccessLevel : The access level that the link will grant to its users. + // A link can grant additional rights to a user beyond their current access + // level. For example, if a user was invited as a viewer to a file, and then + // opens a link with `link_access_level` set to `editor`, then they will + // gain editor privileges. The `link_access_level` is a property of the + // link, and does not depend on who is calling this API. In particular, + // `link_access_level` does not take into account the API caller's current + // permissions to the content. + LinkAccessLevel *LinkAccessLevel `json:"link_access_level,omitempty"` } // NewLinkPermissions returns a new LinkPermissions instance @@ -1990,9 +2045,9 @@ func (u *ListFileMembersContinueError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : has no documentation (yet) - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2002,13 +2057,13 @@ func (u *ListFileMembersContinueError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2055,9 +2110,9 @@ func (u *ListFileMembersError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : has no documentation (yet) - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2067,13 +2122,13 @@ func (u *ListFileMembersError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2102,11 +2157,9 @@ const ( func (u *ListFileMembersIndividualResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Result : The results of the query for this file if it was successful. - Result json.RawMessage `json:"result,omitempty"` // AccessError : The result of the query for this file if it was an // error. - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2122,7 +2175,7 @@ func (u *ListFileMembersIndividualResult) UnmarshalJSON(body []byte) error { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2181,7 +2234,7 @@ func (u *ListFilesContinueError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : User account had a problem. - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` } var w wrap var err error @@ -2191,7 +2244,7 @@ func (u *ListFilesContinueError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err @@ -2281,7 +2334,7 @@ func (u *ListFolderMembersContinueError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2291,7 +2344,7 @@ func (u *ListFolderMembersContinueError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2399,7 +2452,7 @@ func (u *ListSharedLinksError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Path : has no documentation (yet) - Path json.RawMessage `json:"path,omitempty"` + Path *files.LookupError `json:"path,omitempty"` } var w wrap var err error @@ -2409,7 +2462,7 @@ func (u *ListSharedLinksError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "path": - err = json.Unmarshal(w.Path, &u.Path) + u.Path = w.Path if err != nil { return err @@ -2559,6 +2612,10 @@ const ( func (u *MemberSelector) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // DropboxId : Dropbox account, team member, or group ID of member. + DropboxId string `json:"dropbox_id,omitempty"` + // Email : E-mail address of member. + Email string `json:"email,omitempty"` } var w wrap var err error @@ -2568,13 +2625,13 @@ func (u *MemberSelector) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "dropbox_id": - err = json.Unmarshal(body, &u.DropboxId) + u.DropboxId = w.DropboxId if err != nil { return err } case "email": - err = json.Unmarshal(body, &u.Email) + u.Email = w.Email if err != nil { return err @@ -2625,7 +2682,7 @@ func (u *ModifySharedLinkSettingsError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // SettingsError : There is an error with the given settings. - SettingsError json.RawMessage `json:"settings_error,omitempty"` + SettingsError *SharedLinkSettingsError `json:"settings_error,omitempty"` } var w wrap var err error @@ -2635,7 +2692,7 @@ func (u *ModifySharedLinkSettingsError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "settings_error": - err = json.Unmarshal(w.SettingsError, &u.SettingsError) + u.SettingsError = w.SettingsError if err != nil { return err @@ -2683,10 +2740,7 @@ func (u *MountFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` - // InsufficientQuota : The current user does not have enough space to - // mount the shared folder. - InsufficientQuota json.RawMessage `json:"insufficient_quota,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2696,7 +2750,7 @@ func (u *MountFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2794,8 +2848,6 @@ const ( func (u *PermissionDeniedReason) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // InsufficientPlan : has no documentation (yet) - InsufficientPlan json.RawMessage `json:"insufficient_plan,omitempty"` } var w wrap var err error @@ -2847,7 +2899,7 @@ func (u *RelinquishFileMembershipError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2857,7 +2909,7 @@ func (u *RelinquishFileMembershipError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2907,7 +2959,7 @@ func (u *RelinquishFolderMembershipError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2917,7 +2969,7 @@ func (u *RelinquishFolderMembershipError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -2970,13 +3022,9 @@ func (u *RemoveFileMemberError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : has no documentation (yet) - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` - // NoExplicitAccess : This member does not have explicit access to the - // file and therefore cannot be removed. The return value is the access - // that a user might have to the file from a parent folder. - NoExplicitAccess json.RawMessage `json:"no_explicit_access,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -2986,13 +3034,13 @@ func (u *RemoveFileMemberError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -3055,9 +3103,9 @@ func (u *RemoveFolderMemberError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` // MemberError : has no documentation (yet) - MemberError json.RawMessage `json:"member_error,omitempty"` + MemberError *SharedFolderMemberError `json:"member_error,omitempty"` } var w wrap var err error @@ -3067,13 +3115,13 @@ func (u *RemoveFolderMemberError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "member_error": - err = json.Unmarshal(w.MemberError, &u.MemberError) + u.MemberError = w.MemberError if err != nil { return err @@ -3103,11 +3151,8 @@ const ( func (u *RemoveMemberJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : Removing the folder member has finished. The value is - // information about whether the member has another form of access. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : has no documentation (yet) - Failed json.RawMessage `json:"failed,omitempty"` + Failed *RemoveFolderMemberError `json:"failed,omitempty"` } var w wrap var err error @@ -3123,7 +3168,7 @@ func (u *RemoveMemberJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -3132,6 +3177,19 @@ func (u *RemoveMemberJobStatus) UnmarshalJSON(body []byte) error { return nil } +// RequestedLinkAccessLevel : has no documentation (yet) +type RequestedLinkAccessLevel struct { + dropbox.Tagged +} + +// Valid tag values for RequestedLinkAccessLevel +const ( + RequestedLinkAccessLevelViewer = "viewer" + RequestedLinkAccessLevelEditor = "editor" + RequestedLinkAccessLevelMax = "max" + RequestedLinkAccessLevelOther = "other" +) + // RequestedVisibility : The access permission that can be requested by the // caller for the shared link. Note that the final resolved visibility of the // shared link takes into account other aspects, such as team and shared folder @@ -3228,7 +3286,7 @@ func (u *SetAccessInheritanceError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : Unable to access shared folder. - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -3238,7 +3296,7 @@ func (u *SetAccessInheritanceError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -3321,7 +3379,7 @@ func (u *ShareFolderErrorBase) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // BadPath : `ShareFolderArg.path` is invalid. - BadPath json.RawMessage `json:"bad_path,omitempty"` + BadPath *SharePathError `json:"bad_path,omitempty"` } var w wrap var err error @@ -3331,7 +3389,7 @@ func (u *ShareFolderErrorBase) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "bad_path": - err = json.Unmarshal(w.BadPath, &u.BadPath) + u.BadPath = w.BadPath if err != nil { return err @@ -3362,7 +3420,7 @@ func (u *ShareFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // BadPath : `ShareFolderArg.path` is invalid. - BadPath json.RawMessage `json:"bad_path,omitempty"` + BadPath *SharePathError `json:"bad_path,omitempty"` } var w wrap var err error @@ -3372,7 +3430,7 @@ func (u *ShareFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "bad_path": - err = json.Unmarshal(w.BadPath, &u.BadPath) + u.BadPath = w.BadPath if err != nil { return err @@ -3402,11 +3460,8 @@ const ( func (u *ShareFolderJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The share job has finished. The value is the metadata for - // the folder. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : has no documentation (yet) - Failed json.RawMessage `json:"failed,omitempty"` + Failed *ShareFolderError `json:"failed,omitempty"` } var w wrap var err error @@ -3422,7 +3477,7 @@ func (u *ShareFolderJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -3452,8 +3507,10 @@ const ( func (u *ShareFolderLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -3463,7 +3520,7 @@ func (u *ShareFolderLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -3508,9 +3565,6 @@ const ( func (u *SharePathError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // AlreadyShared : Folder is already shared. Contains metadata about the - // existing shared folder. - AlreadyShared json.RawMessage `json:"already_shared,omitempty"` } var w wrap var err error @@ -3670,9 +3724,6 @@ const ( func (u *SharedFolderMemberError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // NoExplicitAccess : The target member only has inherited access to the - // shared folder. - NoExplicitAccess json.RawMessage `json:"no_explicit_access,omitempty"` } var w wrap var err error @@ -3806,6 +3857,43 @@ const ( SharedLinkAccessFailureReasonOther = "other" ) +// SharedLinkAlreadyExistsMetadata : has no documentation (yet) +type SharedLinkAlreadyExistsMetadata struct { + dropbox.Tagged + // Metadata : Metadata of the shared link that already exists. + Metadata IsSharedLinkMetadata `json:"metadata,omitempty"` +} + +// Valid tag values for SharedLinkAlreadyExistsMetadata +const ( + SharedLinkAlreadyExistsMetadataMetadata = "metadata" + SharedLinkAlreadyExistsMetadataOther = "other" +) + +// UnmarshalJSON deserializes into a SharedLinkAlreadyExistsMetadata instance +func (u *SharedLinkAlreadyExistsMetadata) UnmarshalJSON(body []byte) error { + type wrap struct { + dropbox.Tagged + // Metadata : Metadata of the shared link that already exists. + Metadata json.RawMessage `json:"metadata,omitempty"` + } + var w wrap + var err error + if err = json.Unmarshal(body, &w); err != nil { + return err + } + u.Tag = w.Tag + switch u.Tag { + case "metadata": + u.Metadata, err = IsSharedLinkMetadataFromJSON(w.Metadata) + + if err != nil { + return err + } + } + return nil +} + // SharedLinkPolicy : Who can view shared links in this folder. type SharedLinkPolicy struct { dropbox.Tagged @@ -3830,6 +3918,15 @@ type SharedLinkSettings struct { // Expires : Expiration time of the shared link. By default the link won't // expire. Expires time.Time `json:"expires,omitempty"` + // Audience : The new audience who can benefit from the access level + // specified by the link's access level specified in the `link_access_level` + // field of `LinkPermissions`. This is used in conjunction with team + // policies and shared folder policies to determine the final effective + // audience type in the `effective_audience` field of `LinkPermissions. + Audience *LinkAudience `json:"audience,omitempty"` + // Access : Requested access level you want the audience to gain from this + // link. + Access *RequestedLinkAccessLevel `json:"access,omitempty"` } // NewSharedLinkSettings returns a new SharedLinkSettings instance @@ -3934,7 +4031,7 @@ func (u *TransferFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -3944,7 +4041,7 @@ func (u *TransferFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -3986,7 +4083,7 @@ func (u *UnmountFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -3996,7 +4093,7 @@ func (u *UnmountFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -4039,9 +4136,9 @@ func (u *UnshareFileError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UserError : has no documentation (yet) - UserError json.RawMessage `json:"user_error,omitempty"` + UserError *SharingUserError `json:"user_error,omitempty"` // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharingFileAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -4051,13 +4148,13 @@ func (u *UnshareFileError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "user_error": - err = json.Unmarshal(w.UserError, &u.UserError) + u.UserError = w.UserError if err != nil { return err } case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -4106,7 +4203,7 @@ func (u *UnshareFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -4116,7 +4213,7 @@ func (u *UnshareFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err @@ -4187,13 +4284,13 @@ func (u *UpdateFolderMemberError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` // MemberError : has no documentation (yet) - MemberError json.RawMessage `json:"member_error,omitempty"` + MemberError *SharedFolderMemberError `json:"member_error,omitempty"` // NoExplicitAccess : If updating the access type required the member to // be added to the shared folder and there was an error when adding the // member. - NoExplicitAccess json.RawMessage `json:"no_explicit_access,omitempty"` + NoExplicitAccess *AddFolderMemberError `json:"no_explicit_access,omitempty"` } var w wrap var err error @@ -4203,19 +4300,19 @@ func (u *UpdateFolderMemberError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "member_error": - err = json.Unmarshal(w.MemberError, &u.MemberError) + u.MemberError = w.MemberError if err != nil { return err } case "no_explicit_access": - err = json.Unmarshal(w.NoExplicitAccess, &u.NoExplicitAccess) + u.NoExplicitAccess = w.NoExplicitAccess if err != nil { return err @@ -4280,7 +4377,7 @@ func (u *UpdateFolderPolicyError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *SharedFolderAccessError `json:"access_error,omitempty"` } var w wrap var err error @@ -4290,7 +4387,7 @@ func (u *UpdateFolderPolicyError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team/types.go index d8f65fad2..469999ddd 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team/types.go @@ -153,11 +153,11 @@ func (u *BaseTeamFolderError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *TeamFolderAccessError `json:"access_error,omitempty"` // StatusError : has no documentation (yet) - StatusError json.RawMessage `json:"status_error,omitempty"` + StatusError *TeamFolderInvalidStatusError `json:"status_error,omitempty"` // TeamSharedDropboxError : has no documentation (yet) - TeamSharedDropboxError json.RawMessage `json:"team_shared_dropbox_error,omitempty"` + TeamSharedDropboxError *TeamFolderTeamSharedDropboxError `json:"team_shared_dropbox_error,omitempty"` } var w wrap var err error @@ -167,19 +167,19 @@ func (u *BaseTeamFolderError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "status_error": - err = json.Unmarshal(w.StatusError, &u.StatusError) + u.StatusError = w.StatusError if err != nil { return err } case "team_shared_dropbox_error": - err = json.Unmarshal(w.TeamSharedDropboxError, &u.TeamSharedDropboxError) + u.TeamSharedDropboxError = w.TeamSharedDropboxError if err != nil { return err @@ -219,10 +219,8 @@ const ( func (u *CustomQuotaResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : User's custom quota. - Success json.RawMessage `json:"success,omitempty"` // InvalidUser : Invalid user (not in team). - InvalidUser json.RawMessage `json:"invalid_user,omitempty"` + InvalidUser *UserSelectorArg `json:"invalid_user,omitempty"` } var w wrap var err error @@ -238,7 +236,7 @@ func (u *CustomQuotaResult) UnmarshalJSON(body []byte) error { return err } case "invalid_user": - err = json.Unmarshal(w.InvalidUser, &u.InvalidUser) + u.InvalidUser = w.InvalidUser if err != nil { return err @@ -540,13 +538,13 @@ func (u *FeatureValue) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // UploadApiRateLimit : has no documentation (yet) - UploadApiRateLimit json.RawMessage `json:"upload_api_rate_limit,omitempty"` + UploadApiRateLimit *UploadApiRateLimitValue `json:"upload_api_rate_limit,omitempty"` // HasTeamSharedDropbox : has no documentation (yet) - HasTeamSharedDropbox json.RawMessage `json:"has_team_shared_dropbox,omitempty"` + HasTeamSharedDropbox *HasTeamSharedDropboxValue `json:"has_team_shared_dropbox,omitempty"` // HasTeamFileEvents : has no documentation (yet) - HasTeamFileEvents json.RawMessage `json:"has_team_file_events,omitempty"` + HasTeamFileEvents *HasTeamFileEventsValue `json:"has_team_file_events,omitempty"` // HasTeamSelectiveSync : has no documentation (yet) - HasTeamSelectiveSync json.RawMessage `json:"has_team_selective_sync,omitempty"` + HasTeamSelectiveSync *HasTeamSelectiveSyncValue `json:"has_team_selective_sync,omitempty"` } var w wrap var err error @@ -556,25 +554,25 @@ func (u *FeatureValue) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "upload_api_rate_limit": - err = json.Unmarshal(w.UploadApiRateLimit, &u.UploadApiRateLimit) + u.UploadApiRateLimit = w.UploadApiRateLimit if err != nil { return err } case "has_team_shared_dropbox": - err = json.Unmarshal(w.HasTeamSharedDropbox, &u.HasTeamSharedDropbox) + u.HasTeamSharedDropbox = w.HasTeamSharedDropbox if err != nil { return err } case "has_team_file_events": - err = json.Unmarshal(w.HasTeamFileEvents, &u.HasTeamFileEvents) + u.HasTeamFileEvents = w.HasTeamFileEvents if err != nil { return err } case "has_team_selective_sync": - err = json.Unmarshal(w.HasTeamSelectiveSync, &u.HasTeamSelectiveSync) + u.HasTeamSelectiveSync = w.HasTeamSelectiveSync if err != nil { return err @@ -1010,12 +1008,12 @@ func (u *GroupMembersAddError) UnmarshalJSON(body []byte) error { // Currently, you cannot add members to a group if they are not part of // your team, though this may change in a subsequent version. To add new // members to your Dropbox Business team, use the `membersAdd` endpoint. - MembersNotInTeam json.RawMessage `json:"members_not_in_team,omitempty"` + MembersNotInTeam []string `json:"members_not_in_team,omitempty"` // UsersNotFound : These users were not found in Dropbox. - UsersNotFound json.RawMessage `json:"users_not_found,omitempty"` + UsersNotFound []string `json:"users_not_found,omitempty"` // UserCannotBeManagerOfCompanyManagedGroup : A company-managed group // cannot be managed by a user. - UserCannotBeManagerOfCompanyManagedGroup json.RawMessage `json:"user_cannot_be_manager_of_company_managed_group,omitempty"` + UserCannotBeManagerOfCompanyManagedGroup []string `json:"user_cannot_be_manager_of_company_managed_group,omitempty"` } var w wrap var err error @@ -1025,19 +1023,19 @@ func (u *GroupMembersAddError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "members_not_in_team": - err = json.Unmarshal(body, &u.MembersNotInTeam) + u.MembersNotInTeam = w.MembersNotInTeam if err != nil { return err } case "users_not_found": - err = json.Unmarshal(body, &u.UsersNotFound) + u.UsersNotFound = w.UsersNotFound if err != nil { return err } case "user_cannot_be_manager_of_company_managed_group": - err = json.Unmarshal(body, &u.UserCannotBeManagerOfCompanyManagedGroup) + u.UserCannotBeManagerOfCompanyManagedGroup = w.UserCannotBeManagerOfCompanyManagedGroup if err != nil { return err @@ -1123,9 +1121,9 @@ func (u *GroupMembersRemoveError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // MembersNotInTeam : These members are not part of your team. - MembersNotInTeam json.RawMessage `json:"members_not_in_team,omitempty"` + MembersNotInTeam []string `json:"members_not_in_team,omitempty"` // UsersNotFound : These users were not found in Dropbox. - UsersNotFound json.RawMessage `json:"users_not_found,omitempty"` + UsersNotFound []string `json:"users_not_found,omitempty"` } var w wrap var err error @@ -1135,13 +1133,13 @@ func (u *GroupMembersRemoveError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "members_not_in_team": - err = json.Unmarshal(body, &u.MembersNotInTeam) + u.MembersNotInTeam = w.MembersNotInTeam if err != nil { return err } case "users_not_found": - err = json.Unmarshal(body, &u.UsersNotFound) + u.UsersNotFound = w.UsersNotFound if err != nil { return err @@ -1207,6 +1205,10 @@ const ( func (u *GroupSelector) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // GroupId : Group ID. + GroupId string `json:"group_id,omitempty"` + // GroupExternalId : External ID of the group. + GroupExternalId string `json:"group_external_id,omitempty"` } var w wrap var err error @@ -1216,13 +1218,13 @@ func (u *GroupSelector) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "group_id": - err = json.Unmarshal(body, &u.GroupId) + u.GroupId = w.GroupId if err != nil { return err } case "group_external_id": - err = json.Unmarshal(body, &u.GroupExternalId) + u.GroupExternalId = w.GroupExternalId if err != nil { return err @@ -1301,8 +1303,11 @@ const ( func (u *GroupsGetInfoItem) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // GroupInfo : Info about a group. - GroupInfo json.RawMessage `json:"group_info,omitempty"` + // IdNotFound : An ID that was provided as a parameter to + // `groupsGetInfo`, and did not match a corresponding group. The ID can + // be a group ID, or an external ID, depending on how the method was + // called. + IdNotFound string `json:"id_not_found,omitempty"` } var w wrap var err error @@ -1312,7 +1317,7 @@ func (u *GroupsGetInfoItem) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "id_not_found": - err = json.Unmarshal(body, &u.IdNotFound) + u.IdNotFound = w.IdNotFound if err != nil { return err @@ -1482,9 +1487,9 @@ func (u *GroupsSelector) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // GroupIds : List of group IDs. - GroupIds json.RawMessage `json:"group_ids,omitempty"` + GroupIds []string `json:"group_ids,omitempty"` // GroupExternalIds : List of external IDs of groups. - GroupExternalIds json.RawMessage `json:"group_external_ids,omitempty"` + GroupExternalIds []string `json:"group_external_ids,omitempty"` } var w wrap var err error @@ -1494,13 +1499,13 @@ func (u *GroupsSelector) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "group_ids": - err = json.Unmarshal(body, &u.GroupIds) + u.GroupIds = w.GroupIds if err != nil { return err } case "group_external_ids": - err = json.Unmarshal(body, &u.GroupExternalIds) + u.GroupExternalIds = w.GroupExternalIds if err != nil { return err @@ -1526,6 +1531,8 @@ const ( func (u *HasTeamFileEventsValue) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // Enabled : Does this team have file events. + Enabled bool `json:"enabled,omitempty"` } var w wrap var err error @@ -1535,7 +1542,7 @@ func (u *HasTeamFileEventsValue) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "enabled": - err = json.Unmarshal(body, &u.Enabled) + u.Enabled = w.Enabled if err != nil { return err @@ -1561,6 +1568,9 @@ const ( func (u *HasTeamSelectiveSyncValue) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // HasTeamSelectiveSync : Does this team have team selective sync + // enabled. + HasTeamSelectiveSync bool `json:"has_team_selective_sync,omitempty"` } var w wrap var err error @@ -1570,7 +1580,7 @@ func (u *HasTeamSelectiveSyncValue) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "has_team_selective_sync": - err = json.Unmarshal(body, &u.HasTeamSelectiveSync) + u.HasTeamSelectiveSync = w.HasTeamSelectiveSync if err != nil { return err @@ -1596,6 +1606,8 @@ const ( func (u *HasTeamSharedDropboxValue) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // HasTeamSharedDropbox : Does this team have a shared team root. + HasTeamSharedDropbox bool `json:"has_team_shared_dropbox,omitempty"` } var w wrap var err error @@ -1605,7 +1617,7 @@ func (u *HasTeamSharedDropboxValue) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "has_team_shared_dropbox": - err = json.Unmarshal(body, &u.HasTeamSharedDropbox) + u.HasTeamSharedDropbox = w.HasTeamSharedDropbox if err != nil { return err @@ -2019,8 +2031,36 @@ const ( func (u *MemberAddResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Success : Describes a user that was successfully added to the team. - Success json.RawMessage `json:"success,omitempty"` + // TeamLicenseLimit : Team is already full. The organization has no + // available licenses. + TeamLicenseLimit string `json:"team_license_limit,omitempty"` + // FreeTeamMemberLimitReached : Team is already full. The free team + // member limit has been reached. + FreeTeamMemberLimitReached string `json:"free_team_member_limit_reached,omitempty"` + // UserAlreadyOnTeam : User is already on this team. The provided email + // address is associated with a user who is already a member of + // (including in recoverable state) or invited to the team. + UserAlreadyOnTeam string `json:"user_already_on_team,omitempty"` + // UserOnAnotherTeam : User is already on another team. The provided + // email address is associated with a user that is already a member or + // invited to another team. + UserOnAnotherTeam string `json:"user_on_another_team,omitempty"` + // UserAlreadyPaired : User is already paired. + UserAlreadyPaired string `json:"user_already_paired,omitempty"` + // UserMigrationFailed : User migration has failed. + UserMigrationFailed string `json:"user_migration_failed,omitempty"` + // DuplicateExternalMemberId : A user with the given external member ID + // already exists on the team (including in recoverable state). + DuplicateExternalMemberId string `json:"duplicate_external_member_id,omitempty"` + // DuplicateMemberPersistentId : A user with the given persistent ID + // already exists on the team (including in recoverable state). + DuplicateMemberPersistentId string `json:"duplicate_member_persistent_id,omitempty"` + // PersistentIdDisabled : Persistent ID is only available to teams with + // persistent ID SAML configuration. Please contact Dropbox for more + // information. + PersistentIdDisabled string `json:"persistent_id_disabled,omitempty"` + // UserCreationFailed : User creation has failed. + UserCreationFailed string `json:"user_creation_failed,omitempty"` } var w wrap var err error @@ -2036,61 +2076,61 @@ func (u *MemberAddResult) UnmarshalJSON(body []byte) error { return err } case "team_license_limit": - err = json.Unmarshal(body, &u.TeamLicenseLimit) + u.TeamLicenseLimit = w.TeamLicenseLimit if err != nil { return err } case "free_team_member_limit_reached": - err = json.Unmarshal(body, &u.FreeTeamMemberLimitReached) + u.FreeTeamMemberLimitReached = w.FreeTeamMemberLimitReached if err != nil { return err } case "user_already_on_team": - err = json.Unmarshal(body, &u.UserAlreadyOnTeam) + u.UserAlreadyOnTeam = w.UserAlreadyOnTeam if err != nil { return err } case "user_on_another_team": - err = json.Unmarshal(body, &u.UserOnAnotherTeam) + u.UserOnAnotherTeam = w.UserOnAnotherTeam if err != nil { return err } case "user_already_paired": - err = json.Unmarshal(body, &u.UserAlreadyPaired) + u.UserAlreadyPaired = w.UserAlreadyPaired if err != nil { return err } case "user_migration_failed": - err = json.Unmarshal(body, &u.UserMigrationFailed) + u.UserMigrationFailed = w.UserMigrationFailed if err != nil { return err } case "duplicate_external_member_id": - err = json.Unmarshal(body, &u.DuplicateExternalMemberId) + u.DuplicateExternalMemberId = w.DuplicateExternalMemberId if err != nil { return err } case "duplicate_member_persistent_id": - err = json.Unmarshal(body, &u.DuplicateMemberPersistentId) + u.DuplicateMemberPersistentId = w.DuplicateMemberPersistentId if err != nil { return err } case "persistent_id_disabled": - err = json.Unmarshal(body, &u.PersistentIdDisabled) + u.PersistentIdDisabled = w.PersistentIdDisabled if err != nil { return err } case "user_creation_failed": - err = json.Unmarshal(body, &u.UserCreationFailed) + u.UserCreationFailed = w.UserCreationFailed if err != nil { return err @@ -2160,11 +2200,17 @@ type MemberProfile struct { // JoinedOn : The date and time the user joined as a member of a specific // team. JoinedOn time.Time `json:"joined_on,omitempty"` + // SuspendedOn : The date and time the user was suspended from the team + // (contains value only when the member's status matches + // `TeamMemberStatus.suspended`. + SuspendedOn time.Time `json:"suspended_on,omitempty"` // PersistentId : Persistent ID that a team can attach to the user. The // persistent ID is unique ID to be used for SAML authentication. PersistentId string `json:"persistent_id,omitempty"` // IsDirectoryRestricted : Whether the user is a directory restricted user. IsDirectoryRestricted bool `json:"is_directory_restricted,omitempty"` + // ProfilePhotoUrl : URL for the photo representing the user, if one is set. + ProfilePhotoUrl string `json:"profile_photo_url,omitempty"` } // NewMemberProfile returns a new MemberProfile instance @@ -2243,7 +2289,10 @@ func (u *MembersAddJobStatus) UnmarshalJSON(body []byte) error { // Complete : The asynchronous job has finished. For each member that // was specified in the parameter `MembersAddArg` that was provided to // `membersAdd`, a corresponding item is returned in this list. - Complete []json.RawMessage `json:"complete,omitempty"` + Complete []*MemberAddResult `json:"complete,omitempty"` + // Failed : The asynchronous job returned an error. The string contains + // an error message. + Failed string `json:"failed,omitempty"` } var w wrap var err error @@ -2253,13 +2302,13 @@ func (u *MembersAddJobStatus) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "complete": - err = json.Unmarshal(body, &u.Complete) + u.Complete = w.Complete if err != nil { return err } case "failed": - err = json.Unmarshal(body, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -2289,8 +2338,12 @@ const ( func (u *MembersAddLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` // Complete : has no documentation (yet) - Complete []json.RawMessage `json:"complete,omitempty"` + Complete []*MemberAddResult `json:"complete,omitempty"` } var w wrap var err error @@ -2300,13 +2353,13 @@ func (u *MembersAddLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err } case "complete": - err = json.Unmarshal(body, &u.Complete) + u.Complete = w.Complete if err != nil { return err @@ -2422,8 +2475,11 @@ const ( func (u *MembersGetInfoItem) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // MemberInfo : Info about a team member. - MemberInfo json.RawMessage `json:"member_info,omitempty"` + // IdNotFound : An ID that was provided as a parameter to + // `membersGetInfo`, and did not match a corresponding user. This might + // be a team_member_id, an email, or an external ID, depending on how + // the method was called. + IdNotFound string `json:"id_not_found,omitempty"` } var w wrap var err error @@ -2433,7 +2489,7 @@ func (u *MembersGetInfoItem) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "id_not_found": - err = json.Unmarshal(body, &u.IdNotFound) + u.IdNotFound = w.IdNotFound if err != nil { return err @@ -2896,9 +2952,9 @@ func (u *RemoveCustomQuotaResult) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // Success : Successfully removed user. - Success json.RawMessage `json:"success,omitempty"` + Success *UserSelectorArg `json:"success,omitempty"` // InvalidUser : Invalid user (not in team). - InvalidUser json.RawMessage `json:"invalid_user,omitempty"` + InvalidUser *UserSelectorArg `json:"invalid_user,omitempty"` } var w wrap var err error @@ -2908,13 +2964,13 @@ func (u *RemoveCustomQuotaResult) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "success": - err = json.Unmarshal(w.Success, &u.Success) + u.Success = w.Success if err != nil { return err } case "invalid_user": - err = json.Unmarshal(w.InvalidUser, &u.InvalidUser) + u.InvalidUser = w.InvalidUser if err != nil { return err @@ -2980,12 +3036,6 @@ const ( func (u *RevokeDeviceSessionArg) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // WebSession : End an active session. - WebSession json.RawMessage `json:"web_session,omitempty"` - // DesktopClient : Unlink a linked desktop device. - DesktopClient json.RawMessage `json:"desktop_client,omitempty"` - // MobileClient : Unlink a linked mobile device. - MobileClient json.RawMessage `json:"mobile_client,omitempty"` } var w wrap var err error @@ -3242,11 +3292,11 @@ func (u *TeamFolderActivateError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *TeamFolderAccessError `json:"access_error,omitempty"` // StatusError : has no documentation (yet) - StatusError json.RawMessage `json:"status_error,omitempty"` + StatusError *TeamFolderInvalidStatusError `json:"status_error,omitempty"` // TeamSharedDropboxError : has no documentation (yet) - TeamSharedDropboxError json.RawMessage `json:"team_shared_dropbox_error,omitempty"` + TeamSharedDropboxError *TeamFolderTeamSharedDropboxError `json:"team_shared_dropbox_error,omitempty"` } var w wrap var err error @@ -3256,19 +3306,19 @@ func (u *TeamFolderActivateError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "status_error": - err = json.Unmarshal(w.StatusError, &u.StatusError) + u.StatusError = w.StatusError if err != nil { return err } case "team_shared_dropbox_error": - err = json.Unmarshal(w.TeamSharedDropboxError, &u.TeamSharedDropboxError) + u.TeamSharedDropboxError = w.TeamSharedDropboxError if err != nil { return err @@ -3329,11 +3379,11 @@ func (u *TeamFolderArchiveError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *TeamFolderAccessError `json:"access_error,omitempty"` // StatusError : has no documentation (yet) - StatusError json.RawMessage `json:"status_error,omitempty"` + StatusError *TeamFolderInvalidStatusError `json:"status_error,omitempty"` // TeamSharedDropboxError : has no documentation (yet) - TeamSharedDropboxError json.RawMessage `json:"team_shared_dropbox_error,omitempty"` + TeamSharedDropboxError *TeamFolderTeamSharedDropboxError `json:"team_shared_dropbox_error,omitempty"` } var w wrap var err error @@ -3343,19 +3393,19 @@ func (u *TeamFolderArchiveError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "status_error": - err = json.Unmarshal(w.StatusError, &u.StatusError) + u.StatusError = w.StatusError if err != nil { return err } case "team_shared_dropbox_error": - err = json.Unmarshal(w.TeamSharedDropboxError, &u.TeamSharedDropboxError) + u.TeamSharedDropboxError = w.TeamSharedDropboxError if err != nil { return err @@ -3386,12 +3436,9 @@ const ( func (u *TeamFolderArchiveJobStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : The archive job has finished. The value is the metadata - // for the resulting team folder. - Complete json.RawMessage `json:"complete,omitempty"` // Failed : Error occurred while performing an asynchronous job from // `teamFolderArchive`. - Failed json.RawMessage `json:"failed,omitempty"` + Failed *TeamFolderArchiveError `json:"failed,omitempty"` } var w wrap var err error @@ -3407,7 +3454,7 @@ func (u *TeamFolderArchiveJobStatus) UnmarshalJSON(body []byte) error { return err } case "failed": - err = json.Unmarshal(w.Failed, &u.Failed) + u.Failed = w.Failed if err != nil { return err @@ -3437,8 +3484,10 @@ const ( func (u *TeamFolderArchiveLaunch) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Complete : has no documentation (yet) - Complete json.RawMessage `json:"complete,omitempty"` + // AsyncJobId : This response indicates that the processing is + // asynchronous. The string is an id that can be used to obtain the + // status of the asynchronous job. + AsyncJobId string `json:"async_job_id,omitempty"` } var w wrap var err error @@ -3448,7 +3497,7 @@ func (u *TeamFolderArchiveLaunch) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "async_job_id": - err = json.Unmarshal(body, &u.AsyncJobId) + u.AsyncJobId = w.AsyncJobId if err != nil { return err @@ -3500,7 +3549,7 @@ func (u *TeamFolderCreateError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // SyncSettingsError : An error occurred setting the sync settings. - SyncSettingsError json.RawMessage `json:"sync_settings_error,omitempty"` + SyncSettingsError *files.SyncSettingsError `json:"sync_settings_error,omitempty"` } var w wrap var err error @@ -3510,7 +3559,7 @@ func (u *TeamFolderCreateError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "sync_settings_error": - err = json.Unmarshal(w.SyncSettingsError, &u.SyncSettingsError) + u.SyncSettingsError = w.SyncSettingsError if err != nil { return err @@ -3539,8 +3588,9 @@ const ( func (u *TeamFolderGetInfoItem) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // TeamFolderMetadata : Properties of a team folder. - TeamFolderMetadata json.RawMessage `json:"team_folder_metadata,omitempty"` + // IdNotFound : An ID that was provided as a parameter to + // `teamFolderGetInfo` did not match any of the team's team folders. + IdNotFound string `json:"id_not_found,omitempty"` } var w wrap var err error @@ -3550,7 +3600,7 @@ func (u *TeamFolderGetInfoItem) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "id_not_found": - err = json.Unmarshal(body, &u.IdNotFound) + u.IdNotFound = w.IdNotFound if err != nil { return err @@ -3717,11 +3767,11 @@ func (u *TeamFolderPermanentlyDeleteError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *TeamFolderAccessError `json:"access_error,omitempty"` // StatusError : has no documentation (yet) - StatusError json.RawMessage `json:"status_error,omitempty"` + StatusError *TeamFolderInvalidStatusError `json:"status_error,omitempty"` // TeamSharedDropboxError : has no documentation (yet) - TeamSharedDropboxError json.RawMessage `json:"team_shared_dropbox_error,omitempty"` + TeamSharedDropboxError *TeamFolderTeamSharedDropboxError `json:"team_shared_dropbox_error,omitempty"` } var w wrap var err error @@ -3731,19 +3781,19 @@ func (u *TeamFolderPermanentlyDeleteError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "status_error": - err = json.Unmarshal(w.StatusError, &u.StatusError) + u.StatusError = w.StatusError if err != nil { return err } case "team_shared_dropbox_error": - err = json.Unmarshal(w.TeamSharedDropboxError, &u.TeamSharedDropboxError) + u.TeamSharedDropboxError = w.TeamSharedDropboxError if err != nil { return err @@ -3794,11 +3844,11 @@ func (u *TeamFolderRenameError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *TeamFolderAccessError `json:"access_error,omitempty"` // StatusError : has no documentation (yet) - StatusError json.RawMessage `json:"status_error,omitempty"` + StatusError *TeamFolderInvalidStatusError `json:"status_error,omitempty"` // TeamSharedDropboxError : has no documentation (yet) - TeamSharedDropboxError json.RawMessage `json:"team_shared_dropbox_error,omitempty"` + TeamSharedDropboxError *TeamFolderTeamSharedDropboxError `json:"team_shared_dropbox_error,omitempty"` } var w wrap var err error @@ -3808,19 +3858,19 @@ func (u *TeamFolderRenameError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "status_error": - err = json.Unmarshal(w.StatusError, &u.StatusError) + u.StatusError = w.StatusError if err != nil { return err } case "team_shared_dropbox_error": - err = json.Unmarshal(w.TeamSharedDropboxError, &u.TeamSharedDropboxError) + u.TeamSharedDropboxError = w.TeamSharedDropboxError if err != nil { return err @@ -3898,13 +3948,13 @@ func (u *TeamFolderUpdateSyncSettingsError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // AccessError : has no documentation (yet) - AccessError json.RawMessage `json:"access_error,omitempty"` + AccessError *TeamFolderAccessError `json:"access_error,omitempty"` // StatusError : has no documentation (yet) - StatusError json.RawMessage `json:"status_error,omitempty"` + StatusError *TeamFolderInvalidStatusError `json:"status_error,omitempty"` // TeamSharedDropboxError : has no documentation (yet) - TeamSharedDropboxError json.RawMessage `json:"team_shared_dropbox_error,omitempty"` + TeamSharedDropboxError *TeamFolderTeamSharedDropboxError `json:"team_shared_dropbox_error,omitempty"` // SyncSettingsError : An error occurred setting the sync settings. - SyncSettingsError json.RawMessage `json:"sync_settings_error,omitempty"` + SyncSettingsError *files.SyncSettingsError `json:"sync_settings_error,omitempty"` } var w wrap var err error @@ -3914,25 +3964,25 @@ func (u *TeamFolderUpdateSyncSettingsError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "access_error": - err = json.Unmarshal(w.AccessError, &u.AccessError) + u.AccessError = w.AccessError if err != nil { return err } case "status_error": - err = json.Unmarshal(w.StatusError, &u.StatusError) + u.StatusError = w.StatusError if err != nil { return err } case "team_shared_dropbox_error": - err = json.Unmarshal(w.TeamSharedDropboxError, &u.TeamSharedDropboxError) + u.TeamSharedDropboxError = w.TeamSharedDropboxError if err != nil { return err } case "sync_settings_error": - err = json.Unmarshal(w.SyncSettingsError, &u.SyncSettingsError) + u.SyncSettingsError = w.SyncSettingsError if err != nil { return err @@ -4026,9 +4076,6 @@ const ( func (u *TeamMemberStatus) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Removed : User is no longer a member of the team. Removed users are - // only listed when include_removed is true in members/list. - Removed json.RawMessage `json:"removed,omitempty"` } var w wrap var err error @@ -4129,6 +4176,19 @@ func NewTeamNamespacesListResult(Namespaces []*NamespaceMetadata, Cursor string, return s } +// TeamReportFailureReason : has no documentation (yet) +type TeamReportFailureReason struct { + dropbox.Tagged +} + +// Valid tag values for TeamReportFailureReason +const ( + TeamReportFailureReasonTemporaryError = "temporary_error" + TeamReportFailureReasonManyReportsAtOnce = "many_reports_at_once" + TeamReportFailureReasonTooMuchData = "too_much_data" + TeamReportFailureReasonOther = "other" +) + // TokenGetAuthenticatedAdminError : Error returned by // `tokenGetAuthenticatedAdmin`. type TokenGetAuthenticatedAdminError struct { @@ -4173,6 +4233,8 @@ const ( func (u *UploadApiRateLimitValue) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // Limit : The number of upload API calls allowed per month. + Limit uint32 `json:"limit,omitempty"` } var w wrap var err error @@ -4182,7 +4244,7 @@ func (u *UploadApiRateLimitValue) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "limit": - err = json.Unmarshal(body, &u.Limit) + u.Limit = w.Limit if err != nil { return err @@ -4247,6 +4309,12 @@ const ( func (u *UserSelectorArg) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // TeamMemberId : has no documentation (yet) + TeamMemberId string `json:"team_member_id,omitempty"` + // ExternalId : has no documentation (yet) + ExternalId string `json:"external_id,omitempty"` + // Email : has no documentation (yet) + Email string `json:"email,omitempty"` } var w wrap var err error @@ -4256,19 +4324,19 @@ func (u *UserSelectorArg) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "team_member_id": - err = json.Unmarshal(body, &u.TeamMemberId) + u.TeamMemberId = w.TeamMemberId if err != nil { return err } case "external_id": - err = json.Unmarshal(body, &u.ExternalId) + u.ExternalId = w.ExternalId if err != nil { return err } case "email": - err = json.Unmarshal(body, &u.Email) + u.Email = w.Email if err != nil { return err @@ -4301,11 +4369,11 @@ func (u *UsersSelectorArg) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged // TeamMemberIds : List of member IDs. - TeamMemberIds json.RawMessage `json:"team_member_ids,omitempty"` + TeamMemberIds []string `json:"team_member_ids,omitempty"` // ExternalIds : List of external user IDs. - ExternalIds json.RawMessage `json:"external_ids,omitempty"` + ExternalIds []string `json:"external_ids,omitempty"` // Emails : List of email addresses. - Emails json.RawMessage `json:"emails,omitempty"` + Emails []string `json:"emails,omitempty"` } var w wrap var err error @@ -4315,19 +4383,19 @@ func (u *UsersSelectorArg) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "team_member_ids": - err = json.Unmarshal(body, &u.TeamMemberIds) + u.TeamMemberIds = w.TeamMemberIds if err != nil { return err } case "external_ids": - err = json.Unmarshal(body, &u.ExternalIds) + u.ExternalIds = w.ExternalIds if err != nil { return err } case "emails": - err = json.Unmarshal(body, &u.Emails) + u.Emails = w.Emails if err != nil { return err diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team_policies/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team_policies/types.go index 9c003f437..2facb0c45 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team_policies/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team_policies/types.go @@ -71,6 +71,18 @@ const ( OfficeAddInPolicyOther = "other" ) +// PaperDefaultFolderPolicy : has no documentation (yet) +type PaperDefaultFolderPolicy struct { + dropbox.Tagged +} + +// Valid tag values for PaperDefaultFolderPolicy +const ( + PaperDefaultFolderPolicyEveryoneInTeam = "everyone_in_team" + PaperDefaultFolderPolicyInviteOnly = "invite_only" + PaperDefaultFolderPolicyOther = "other" +) + // PaperDeploymentPolicy : has no documentation (yet) type PaperDeploymentPolicy struct { dropbox.Tagged @@ -83,6 +95,18 @@ const ( PaperDeploymentPolicyOther = "other" ) +// PaperDesktopPolicy : has no documentation (yet) +type PaperDesktopPolicy struct { + dropbox.Tagged +} + +// Valid tag values for PaperDesktopPolicy +const ( + PaperDesktopPolicyDisabled = "disabled" + PaperDesktopPolicyEnabled = "enabled" + PaperDesktopPolicyOther = "other" +) + // PaperEnabledPolicy : has no documentation (yet) type PaperEnabledPolicy struct { dropbox.Tagged @@ -278,3 +302,15 @@ const ( TwoStepVerificationPolicyRequireTfaDisable = "require_tfa_disable" TwoStepVerificationPolicyOther = "other" ) + +// TwoStepVerificationState : has no documentation (yet) +type TwoStepVerificationState struct { + dropbox.Tagged +} + +// Valid tag values for TwoStepVerificationState +const ( + TwoStepVerificationStateRequired = "required" + TwoStepVerificationStateOptional = "optional" + TwoStepVerificationStateOther = "other" +) diff --git a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users/types.go b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users/types.go index b9d7d920f..fd707e423 100644 --- a/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users/types.go +++ b/vendor/github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users/types.go @@ -278,6 +278,9 @@ const ( func (u *GetAccountBatchError) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged + // NoAccount : The value is an account ID specified in + // `GetAccountBatchArg.account_ids` that does not exist. + NoAccount string `json:"no_account,omitempty"` } var w wrap var err error @@ -287,7 +290,7 @@ func (u *GetAccountBatchError) UnmarshalJSON(body []byte) error { u.Tag = w.Tag switch u.Tag { case "no_account": - err = json.Unmarshal(body, &u.NoAccount) + u.NoAccount = w.NoAccount if err != nil { return err @@ -372,11 +375,6 @@ const ( func (u *SpaceAllocation) UnmarshalJSON(body []byte) error { type wrap struct { dropbox.Tagged - // Individual : The user's space allocation applies only to their - // individual account. - Individual json.RawMessage `json:"individual,omitempty"` - // Team : The user shares space with other members of their team. - Team json.RawMessage `json:"team,omitempty"` } var w wrap var err error diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index fdd328bb7..70fbda532 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -393,7 +393,7 @@ func (p *Buffer) Bytes() []byte { return p.buf } // than relying on this API. // // If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and +// by keys in lexicographical order. This is an implementation detail and // subject to change. func (p *Buffer) SetDeterministic(deterministic bool) { p.deterministic = deterministic diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 1aaee725b..d97f9b356 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -456,6 +456,8 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -519,8 +521,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert // mutating this value. v = v.Addr() } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } diff --git a/vendor/github.com/mattn/go-ieproxy/go.mod b/vendor/github.com/mattn/go-ieproxy/go.mod new file mode 100644 index 000000000..4090bb8e7 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/go.mod @@ -0,0 +1,9 @@ +module github.com/mattn/go-ieproxy + +go 1.14 + +require ( + golang.org/x/net v0.0.0-20191112182307-2180aed22343 + golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea + golang.org/x/text v0.3.2 // indirect +) diff --git a/vendor/github.com/mattn/go-ieproxy/go.sum b/vendor/github.com/mattn/go-ieproxy/go.sum new file mode 100644 index 000000000..a87acb1ec --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/go.sum @@ -0,0 +1,11 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20191112182307-2180aed22343 h1:00ohfJ4K98s3m6BGUoBd8nyfp4Yl0GoIKvw5abItTjI= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea h1:Mz1TMnfJDRJLk8S8OPCoJYgrsp/Se/2TBre2+vwX128= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go index a3d4c11c4..2bf003602 100644 --- a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go @@ -25,33 +25,62 @@ func getConf() ProxyConf { } func writeConf() { - var ( - cfg *tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG - err error - ) + proxy := "" + proxyByPass := "" + autoConfigUrl := "" + autoDetect := false - if cfg, err = getUserConfigFromWindowsSyscall(); err != nil { + // Try from IE first. + if ieCfg, err := getUserConfigFromWindowsSyscall(); err == nil { + defer globalFreeWrapper(ieCfg.lpszProxy) + defer globalFreeWrapper(ieCfg.lpszProxyBypass) + defer globalFreeWrapper(ieCfg.lpszAutoConfigUrl) + + proxy = StringFromUTF16Ptr(ieCfg.lpszProxy) + proxyByPass = StringFromUTF16Ptr(ieCfg.lpszProxyBypass) + autoConfigUrl = StringFromUTF16Ptr(ieCfg.lpszAutoConfigUrl) + autoDetect = ieCfg.fAutoDetect + } + + if proxy == "" { // <- new. Only fallback if we got NO proxy + // Try WinHTTP default proxy. + if defaultCfg, err := getDefaultProxyConfiguration(); err == nil { + defer globalFreeWrapper(defaultCfg.lpszProxy) + defer globalFreeWrapper(defaultCfg.lpszProxyBypass) + + // Changed, next 2 lines, so if that if we always set both of these (they are a pair, it doesn't make sense to set one here and keep the value of the other from above) + newProxy := StringFromUTF16Ptr(defaultCfg.lpszProxy) + if proxy == "" { + proxy = newProxy + } + + newProxyByPass := StringFromUTF16Ptr(defaultCfg.lpszProxyBypass) + if proxyByPass == "" { + proxyByPass = newProxyByPass + } + } + } + + if proxy == "" && !autoDetect { + // Fall back to IE registry or manual detection if nothing is found there.. regedit, _ := readRegedit() // If the syscall fails, backup to manual detection. windowsProxyConf = parseRegedit(regedit) return } - defer globalFreeWrapper(cfg.lpszProxy) - defer globalFreeWrapper(cfg.lpszProxyBypass) - defer globalFreeWrapper(cfg.lpszAutoConfigUrl) - + // Setting the proxy settings. windowsProxyConf = ProxyConf{ Static: StaticProxyConf{ - Active: cfg.lpszProxy != nil, + Active: len(proxy) > 0, }, Automatic: ProxyScriptConf{ - Active: cfg.lpszAutoConfigUrl != nil || cfg.fAutoDetect, + Active: len(autoConfigUrl) > 0 || autoDetect, }, } if windowsProxyConf.Static.Active { protocol := make(map[string]string) - for _, s := range strings.Split(StringFromUTF16Ptr(cfg.lpszProxy), ";") { + for _, s := range strings.Split(proxy, ";") { s = strings.TrimSpace(s) if s == "" { continue @@ -65,31 +94,38 @@ func writeConf() { } windowsProxyConf.Static.Protocols = protocol - if cfg.lpszProxyBypass != nil { - windowsProxyConf.Static.NoProxy = strings.Replace(StringFromUTF16Ptr(cfg.lpszProxyBypass), ";", ",", -1) + if len(proxyByPass) > 0 { + windowsProxyConf.Static.NoProxy = strings.Replace(proxyByPass, ";", ",", -1) } } if windowsProxyConf.Automatic.Active { - windowsProxyConf.Automatic.PreConfiguredURL = StringFromUTF16Ptr(cfg.lpszAutoConfigUrl) + windowsProxyConf.Automatic.PreConfiguredURL = autoConfigUrl } } func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) { - handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0) - if handle == 0 { - return &tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG{}, err + if err := winHttpGetIEProxyConfigForCurrentUser.Find(); err != nil { + return nil, err } - defer winHttpCloseHandle.Call(handle) - - config := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG) - - ret, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(config))) - if ret > 0 { - err = nil + p := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG) + r, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(p))) + if rTrue(r) { + return p, nil } + return nil, err +} - return config, err +func getDefaultProxyConfiguration() (*tWINHTTP_PROXY_INFO, error) { + pInfo := new(tWINHTTP_PROXY_INFO) + if err := winHttpGetDefaultProxyConfiguration.Find(); err != nil { + return nil, err + } + r, _, err := winHttpGetDefaultProxyConfiguration.Call(uintptr(unsafe.Pointer(pInfo))) + if rTrue(r) { + return pInfo, nil + } + return nil, err } // OverrideEnvWithStaticProxy writes new values to the @@ -135,7 +171,27 @@ func parseRegedit(regedit regeditValues) ProxyConf { } func readRegedit() (values regeditValues, err error) { - k, err := registry.OpenKey(registry.CURRENT_USER, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) + var proxySettingsPerUser uint64 = 1 // 1 is the default value to consider current user + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) + if err == nil { + //We had used the below variable tempPrxUsrSettings, because the Golang method GetIntegerValue + //sets the value to zero even it fails. + tempPrxUsrSettings, _, err := k.GetIntegerValue("ProxySettingsPerUser") + if err == nil { + //consider the value of tempPrxUsrSettings if it is a success + proxySettingsPerUser = tempPrxUsrSettings + } + k.Close() + } + + var hkey registry.Key + if proxySettingsPerUser == 0 { + hkey = registry.LOCAL_MACHINE + } else { + hkey = registry.CURRENT_USER + } + + k, err = registry.OpenKey(hkey, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) if err != nil { return } diff --git a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go index cfb4349e2..30ebbd22a 100644 --- a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go +++ b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go @@ -13,3 +13,7 @@ func globalFreeWrapper(ptr *uint16) { _, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr))) } } + +func rTrue(r uintptr) bool { + return r == 1 +} diff --git a/vendor/github.com/mattn/go-ieproxy/GetProxyFunc.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go similarity index 100% rename from vendor/github.com/mattn/go-ieproxy/GetProxyFunc.go rename to vendor/github.com/mattn/go-ieproxy/proxy_middleman.go diff --git a/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_unix.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go similarity index 100% rename from vendor/github.com/mattn/go-ieproxy/proxyMiddleman_unix.go rename to vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go diff --git a/vendor/github.com/mattn/go-ieproxy/proxyMiddleman_windows.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go similarity index 100% rename from vendor/github.com/mattn/go-ieproxy/proxyMiddleman_windows.go rename to vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go diff --git a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go index 560940df8..4d3b16778 100644 --- a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go +++ b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go @@ -7,6 +7,7 @@ var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl") var winHttpOpen = winHttp.NewProc("WinHttpOpen") var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle") var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser") +var winHttpGetDefaultProxyConfiguration = winHttp.NewProc("WinHttpGetDefaultProxyConfiguration") type tWINHTTP_AUTOPROXY_OPTIONS struct { dwFlags autoProxyFlag diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml index 5597e026d..604314dd4 100644 --- a/vendor/github.com/mattn/go-isatty/.travis.yml +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -1,13 +1,14 @@ language: go +sudo: false go: + - 1.13.x - tip -os: - - linux - - osx - before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md index 1e69004bb..38418353e 100644 --- a/vendor/github.com/mattn/go-isatty/README.md +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -1,7 +1,7 @@ # go-isatty [![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) -[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Codecov](https://codecov.io/gh/mattn/go-isatty/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-isatty) [![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) [![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod index 27ff17727..605c4c221 100644 --- a/vendor/github.com/mattn/go-isatty/go.mod +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -1,3 +1,5 @@ module github.com/mattn/go-isatty -require golang.org/x/sys v0.0.0-20191008105621-543471e840be +go 1.12 + +require golang.org/x/sys v0.0.0-20200116001909-b77594299b42 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum index c141fc53a..912e29cbc 100644 --- a/vendor/github.com/mattn/go-isatty/go.sum +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -1,4 +1,2 @@ -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/mattn/go-isatty/go.test.sh b/vendor/github.com/mattn/go-isatty/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go deleted file mode 100644 index d3567cb5b..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_android.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build android - -package isatty - -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TCGETS - -// IsTerminal return true if the file descriptor is terminal. -func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} - -// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 -// terminal. This is also always false on this environment. -func IsCygwinTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index 07e93039d..711f28808 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -3,18 +3,12 @@ package isatty -import ( - "syscall" - "unsafe" -) - -const ioctlReadTermios = syscall.TIOCGETA +import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { - var termios syscall.Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) + return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 453b025d0..31a1ca973 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,5 @@ // +build linux aix // +build !appengine -// +build !android package isatty diff --git a/vendor/github.com/mattn/go-isatty/renovate.json b/vendor/github.com/mattn/go-isatty/renovate.json new file mode 100644 index 000000000..5ae9d96b7 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/renovate.json @@ -0,0 +1,8 @@ +{ + "extends": [ + "config:base" + ], + "postUpdateOptions": [ + "gomodTidy" + ] +} diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml index 5c9c2a30f..6a21813a3 100644 --- a/vendor/github.com/mattn/go-runewidth/.travis.yml +++ b/vendor/github.com/mattn/go-runewidth/.travis.yml @@ -1,8 +1,16 @@ language: go +sudo: false go: + - 1.13.x - tip + before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover + - go get -t -v ./... + script: - - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL + - go generate + - git diff --cached --exit-code + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-runewidth/README.mkd b/vendor/github.com/mattn/go-runewidth/README.md similarity index 82% rename from vendor/github.com/mattn/go-runewidth/README.mkd rename to vendor/github.com/mattn/go-runewidth/README.md index 66663a94b..aa56ab96c 100644 --- a/vendor/github.com/mattn/go-runewidth/README.mkd +++ b/vendor/github.com/mattn/go-runewidth/README.md @@ -2,7 +2,7 @@ go-runewidth ============ [![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth) -[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD) +[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth) [![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth) [![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth) diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh new file mode 100644 index 000000000..012162b07 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -race -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index 8d64da077..19f8e0449 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -50,7 +50,6 @@ func inTables(r rune, ts ...table) bool { } func inTable(r rune, t table) bool { - // func (t table) IncludesRune(r rune) bool { if r < t[0].first { return false } diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go index 9ca6d0e28..a8ccee5b1 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -1,3 +1,5 @@ +// Code generated by script/generate.go. DO NOT EDIT. + package runewidth var combining = table{ diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md index 7e678592b..838ec623e 100644 --- a/vendor/github.com/ncw/swift/README.md +++ b/vendor/github.com/ncw/swift/README.md @@ -157,3 +157,5 @@ Contributors - Jérémy Clerc - 4xicom <37339705+4xicom@users.noreply.github.com> - Bo +- Thiago da Silva +- Brandon WELSCH diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go index cdb79d908..217647b9a 100644 --- a/vendor/github.com/ncw/swift/swift.go +++ b/vendor/github.com/ncw/swift/swift.go @@ -125,7 +125,7 @@ type Connection struct { Expires time.Time // time the token expires, may be Zero if unknown client *http.Client Auth Authenticator `json:"-" xml:"-"` // the current authenticator - authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth + authLock *sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth // swiftInfo is filled after QueryInfo is called swiftInfo SwiftInfo } @@ -458,6 +458,9 @@ func (c *Connection) setDefaults() { // If you don't call it before calling one of the connection methods // then it will be called for you on the first access. func (c *Connection) Authenticate() (err error) { + if c.authLock == nil { + c.authLock = &sync.Mutex{} + } c.authLock.Lock() defer c.authLock.Unlock() return c.authenticate() @@ -580,6 +583,9 @@ func (c *Connection) UnAuthenticate() { // // Doesn't actually check the credentials against the server. func (c *Connection) Authenticated() bool { + if c.authLock == nil { + c.authLock = &sync.Mutex{} + } c.authLock.Lock() defer c.authLock.Unlock() return c.authenticated() @@ -1485,6 +1491,22 @@ func (c *Connection) ObjectCreate(container string, objectName string, checkHash return } +func (c *Connection) ObjectSymlinkCreate(container string, symlink string, targetAccount string, targetContainer string, targetObject string, targetEtag string) (headers Headers, err error) { + + EMPTY_MD5 := "d41d8cd98f00b204e9800998ecf8427e" + symHeaders := Headers{} + contents := bytes.NewBufferString("") + if targetAccount != "" { + symHeaders["X-Symlink-Target-Account"] = targetAccount + } + if targetEtag != "" { + symHeaders["X-Symlink-Target-Etag"] = targetEtag + } + symHeaders["X-Symlink-Target"] = fmt.Sprintf("%s/%s", targetContainer, targetObject) + _, err = c.ObjectPut(container, symlink, contents, true, EMPTY_MD5, "application/symlink", symHeaders) + return +} + func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) { extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) hash := md5.New() diff --git a/vendor/github.com/nsf/termbox-go/termbox_windows.go b/vendor/github.com/nsf/termbox-go/termbox_windows.go index 22e0f9ea0..d46eb043e 100644 --- a/vendor/github.com/nsf/termbox-go/termbox_windows.go +++ b/vendor/github.com/nsf/termbox-go/termbox_windows.go @@ -771,7 +771,11 @@ func key_event_record_to_event(r *key_event_record) (Event, bool) { case vk_tab: e.Key = KeyTab case vk_enter: - e.Key = KeyEnter + if ctrlpressed { + e.Key = KeyCtrlJ + } else { + e.Key = KeyEnter + } case vk_esc: switch { case input_mode&InputEsc != 0: diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index d4b92663b..9159de03e 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -1,15 +1,10 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip script: - - go test -v ./... + - make check diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 000000000..ce9d7cded --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 6483ba2af..54dfdcb12 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -41,11 +41,18 @@ default: [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. ## License diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 7421f326f..161aea258 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -82,7 +82,7 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // @@ -159,6 +159,9 @@ type withStack struct { func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -241,6 +244,9 @@ type withMessage struct { func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 000000000..be0d10d0c --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 2874a048c..779a8348f 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -5,10 +5,13 @@ import ( "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ func (f Frame) line() int { return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) { case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) { } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame @@ -94,18 +110,32 @@ func (st StackTrace) Format(s fmt.State, verb rune) { switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) } } +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) + } + io.WriteString(s, "]") +} + // stack represents a stack of program counters. type stack []uintptr diff --git a/vendor/github.com/pkg/sftp/.travis.yml b/vendor/github.com/pkg/sftp/.travis.yml index 3c1e05e5f..51dead078 100644 --- a/vendor/github.com/pkg/sftp/.travis.yml +++ b/vendor/github.com/pkg/sftp/.travis.yml @@ -4,25 +4,25 @@ go_import_path: github.com/pkg/sftp # current and previous stable releases, plus tip # remember to exclude previous and tip for macs below go: - - 1.11.x - 1.12.x + - 1.13.x - tip os: - linux - osx -env: - global: - - GO111MODULE=on - matrix: exclude: - os: osx - go: 1.10.x + go: 1.12.x - os: osx go: tip +env: + global: + - GO111MODULE=on + addons: ssh_known_hosts: - bitbucket.org diff --git a/vendor/github.com/pkg/sftp/Makefile b/vendor/github.com/pkg/sftp/Makefile new file mode 100644 index 000000000..781fe1f5a --- /dev/null +++ b/vendor/github.com/pkg/sftp/Makefile @@ -0,0 +1,11 @@ +integration: + go test -integration -v + go test -testserver -v + go test -integration -testserver -v + +integration_w_race: + go test -race -integration -v + go test -race -testserver -v + go test -race -integration -testserver -v + + diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go index 335bcc284..a51cd0923 100644 --- a/vendor/github.com/pkg/sftp/attrs.go +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -10,11 +10,14 @@ import ( ) const ( - ssh_FILEXFER_ATTR_SIZE = 0x00000001 - ssh_FILEXFER_ATTR_UIDGID = 0x00000002 - ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004 - ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008 - ssh_FILEXFER_ATTR_EXTENDED = 0x80000000 + sshFileXferAttrSize = 0x00000001 + sshFileXferAttrUIDGID = 0x00000002 + sshFileXferAttrPermissions = 0x00000004 + sshFileXferAttrACmodTime = 0x00000008 + sshFileXferAttrExtented = 0x80000000 + + sshFileXferAttrAll = sshFileXferAttrSize | sshFileXferAttrUIDGID | sshFileXferAttrPermissions | + sshFileXferAttrACmodTime | sshFileXferAttrExtented ) // fileInfo is an artificial type designed to satisfy os.FileInfo. @@ -77,9 +80,9 @@ func fileInfoFromStat(st *FileStat, name string) os.FileInfo { func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { mtime := fi.ModTime().Unix() atime := mtime - var flags uint32 = ssh_FILEXFER_ATTR_SIZE | - ssh_FILEXFER_ATTR_PERMISSIONS | - ssh_FILEXFER_ATTR_ACMODTIME + var flags uint32 = sshFileXferAttrSize | + sshFileXferAttrPermissions | + sshFileXferAttrACmodTime fileStat := FileStat{ Size: uint64(fi.Size()), @@ -101,31 +104,31 @@ func unmarshalAttrs(b []byte) (*FileStat, []byte) { func getFileStat(flags uint32, b []byte) (*FileStat, []byte) { var fs FileStat - if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE { - fs.Size, b = unmarshalUint64(b) + if flags&sshFileXferAttrSize == sshFileXferAttrSize { + fs.Size, b, _ = unmarshalUint64Safe(b) } - if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { - fs.UID, b = unmarshalUint32(b) + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.UID, b, _ = unmarshalUint32Safe(b) } - if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { - fs.GID, b = unmarshalUint32(b) + if flags&sshFileXferAttrUIDGID == sshFileXferAttrUIDGID { + fs.GID, b, _ = unmarshalUint32Safe(b) } - if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS { - fs.Mode, b = unmarshalUint32(b) + if flags&sshFileXferAttrPermissions == sshFileXferAttrPermissions { + fs.Mode, b, _ = unmarshalUint32Safe(b) } - if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME { - fs.Atime, b = unmarshalUint32(b) - fs.Mtime, b = unmarshalUint32(b) + if flags&sshFileXferAttrACmodTime == sshFileXferAttrACmodTime { + fs.Atime, b, _ = unmarshalUint32Safe(b) + fs.Mtime, b, _ = unmarshalUint32Safe(b) } - if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED { + if flags&sshFileXferAttrExtented == sshFileXferAttrExtented { var count uint32 - count, b = unmarshalUint32(b) + count, b, _ = unmarshalUint32Safe(b) ext := make([]StatExtended, count) for i := uint32(0); i < count; i++ { var typ string var data string - typ, b = unmarshalString(b) - data, b = unmarshalString(b) + typ, b, _ = unmarshalStringSafe(b) + data, b, _ = unmarshalStringSafe(b) ext[i] = StatExtended{typ, data} } fs.Extended = ext @@ -152,17 +155,17 @@ func marshalFileInfo(b []byte, fi os.FileInfo) []byte { flags, fileStat := fileStatFromInfo(fi) b = marshalUint32(b, flags) - if flags&ssh_FILEXFER_ATTR_SIZE != 0 { + if flags&sshFileXferAttrSize != 0 { b = marshalUint64(b, fileStat.Size) } - if flags&ssh_FILEXFER_ATTR_UIDGID != 0 { + if flags&sshFileXferAttrUIDGID != 0 { b = marshalUint32(b, fileStat.UID) b = marshalUint32(b, fileStat.GID) } - if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 { + if flags&sshFileXferAttrPermissions != 0 { b = marshalUint32(b, fileStat.Mode) } - if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 { + if flags&sshFileXferAttrACmodTime != 0 { b = marshalUint32(b, fileStat.Atime) b = marshalUint32(b, fileStat.Mtime) } @@ -173,7 +176,7 @@ func marshalFileInfo(b []byte, fi os.FileInfo) []byte { // toFileMode converts sftp filemode bits to the os.FileMode specification func toFileMode(mode uint32) os.FileMode { var fm = os.FileMode(mode & 0777) - switch mode & syscall.S_IFMT { + switch mode & S_IFMT { case syscall.S_IFBLK: fm |= os.ModeDevice case syscall.S_IFCHR: diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go index 95e7922e9..846b2086d 100644 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -10,7 +10,7 @@ import ( func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { if statt, ok := fi.Sys().(*syscall.Stat_t); ok { - *flags |= ssh_FILEXFER_ATTR_UIDGID + *flags |= sshFileXferAttrUIDGID fileStat.UID = statt.Uid fileStat.GID = statt.Gid } diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go index 11a81b5e0..0d09d2a2c 100644 --- a/vendor/github.com/pkg/sftp/client.go +++ b/vendor/github.com/pkg/sftp/client.go @@ -15,12 +15,18 @@ import ( "golang.org/x/crypto/ssh" ) -// InternalInconsistency indicates the packets sent and the data queued to be -// written to the file don't match up. It is an unusual error and usually is -// caused by bad behavior server side or connection issues. The error is -// limited in scope to the call where it happened, the client object is still -// OK to use as long as the connection is still open. -var InternalInconsistency = errors.New("internal inconsistency") +var ( + // ErrInternalInconsistency indicates the packets sent and the data queued to be + // written to the file don't match up. It is an unusual error and usually is + // caused by bad behavior server side or connection issues. The error is + // limited in scope to the call where it happened, the client object is still + // OK to use as long as the connection is still open. + ErrInternalInconsistency = errors.New("internal inconsistency") + // InternalInconsistency alias for ErrInternalInconsistency. + // + // Deprecated: please use ErrInternalInconsistency + InternalInconsistency = ErrInternalInconsistency +) // A ClientOption is a function which applies configuration to a Client. type ClientOption func(*Client) error @@ -45,6 +51,30 @@ func MaxPacketChecked(size int) ClientOption { } } +// UseFstat sets whether to use Fstat or Stat when File.WriteTo is called +// (usually when copying files). +// Some servers limit the amount of open files and calling Stat after opening +// the file will throw an error From the server. Setting this flag will call +// Fstat instead of Stat which is suppose to be called on an open file handle. +// +// It has been found that that with IBM Sterling SFTP servers which have +// "extractability" level set to 1 which means only 1 file can be opened at +// any given time. +// +// If the server you are working with still has an issue with both Stat and +// Fstat calls you can always open a file and read it until the end. +// +// Another reason to read the file until its end and Fstat doesn't work is +// that in some servers, reading a full file will automatically delete the +// file as some of these mainframes map the file to a message in a queue. +// Once the file has been read it will get deleted. +func UseFstat(value bool) ClientOption { + return func(c *Client) error { + c.useFstat = value + return nil + } +} + // MaxPacketUnchecked sets the maximum size of the payload, measured in bytes. // It accepts sizes larger than the 32768 bytes all servers should support. // Only use a setting higher than 32768 if your application always connects to @@ -155,12 +185,17 @@ type Client struct { maxPacket int // max packet size read or written. nextid uint32 maxConcurrentRequests int + useFstat bool } // Create creates the named file mode 0666 (before umask), truncating it if it // already exists. If successful, methods on the returned File can be used for // I/O; the associated file descriptor has mode O_RDWR. If you need more // control over the flags/mode used to open the file see client.OpenFile. +// +// Note that some SFTP servers (eg. AWS Transfer) do not support opening files +// read/write at the same time. For those services you will need to use +// `client.OpenFile(os.O_WRONLY|os.O_CREATE|os.O_TRUNC)`. func (c *Client) Create(path string) (*File, error) { return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) } @@ -183,8 +218,8 @@ func (c *Client) recvVersion() error { if err != nil { return err } - if typ != ssh_FXP_VERSION { - return &unexpectedPacketErr{ssh_FXP_VERSION, typ} + if typ != sshFxpVersion { + return &unexpectedPacketErr{sshFxpVersion, typ} } version, _ := unmarshalUint32(data) @@ -222,7 +257,7 @@ func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { break } switch typ { - case ssh_FXP_NAME: + case sshFxpName: sid, data := unmarshalUint32(data) if sid != id { return nil, &unexpectedIDErr{id, sid} @@ -239,7 +274,7 @@ func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { } attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) } - case ssh_FXP_STATUS: + case sshFxpStatus: // TODO(dfc) scope warning! err = normaliseError(unmarshalStatus(id, data)) done = true @@ -263,14 +298,14 @@ func (c *Client) opendir(path string) (string, error) { return "", err } switch typ { - case ssh_FXP_HANDLE: + case sshFxpHandle: sid, data := unmarshalUint32(data) if sid != id { return "", &unexpectedIDErr{id, sid} } handle, _ := unmarshalString(data) return handle, nil - case ssh_FXP_STATUS: + case sshFxpStatus: return "", normaliseError(unmarshalStatus(id, data)) default: return "", unimplementedPacketErr(typ) @@ -289,14 +324,14 @@ func (c *Client) Stat(p string) (os.FileInfo, error) { return nil, err } switch typ { - case ssh_FXP_ATTRS: + case sshFxpAttrs: sid, data := unmarshalUint32(data) if sid != id { return nil, &unexpectedIDErr{id, sid} } attr, _ := unmarshalAttrs(data) return fileInfoFromStat(attr, path.Base(p)), nil - case ssh_FXP_STATUS: + case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) default: return nil, unimplementedPacketErr(typ) @@ -315,14 +350,14 @@ func (c *Client) Lstat(p string) (os.FileInfo, error) { return nil, err } switch typ { - case ssh_FXP_ATTRS: + case sshFxpAttrs: sid, data := unmarshalUint32(data) if sid != id { return nil, &unexpectedIDErr{id, sid} } attr, _ := unmarshalAttrs(data) return fileInfoFromStat(attr, path.Base(p)), nil - case ssh_FXP_STATUS: + case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) default: return nil, unimplementedPacketErr(typ) @@ -340,7 +375,7 @@ func (c *Client) ReadLink(p string) (string, error) { return "", err } switch typ { - case ssh_FXP_NAME: + case sshFxpName: sid, data := unmarshalUint32(data) if sid != id { return "", &unexpectedIDErr{id, sid} @@ -351,13 +386,32 @@ func (c *Client) ReadLink(p string) (string, error) { } filename, _ := unmarshalString(data) // ignore dummy attributes return filename, nil - case ssh_FXP_STATUS: + case sshFxpStatus: return "", normaliseError(unmarshalStatus(id, data)) default: return "", unimplementedPacketErr(typ) } } +// Link creates a hard link at 'newname', pointing at the same inode as 'oldname' +func (c *Client) Link(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpHardlinkPacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case sshFxpStatus: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + // Symlink creates a symbolic link at 'newname', pointing at target 'oldname' func (c *Client) Symlink(oldname, newname string) error { id := c.nextID() @@ -370,7 +424,7 @@ func (c *Client) Symlink(oldname, newname string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -390,7 +444,7 @@ func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -404,7 +458,7 @@ func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { Mtime uint32 } attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} - return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs) + return c.setstat(path, sshFileXferAttrACmodTime, attrs) } // Chown changes the user and group owners of the named file. @@ -414,12 +468,12 @@ func (c *Client) Chown(path string, uid, gid int) error { GID uint32 } attrs := owner{uint32(uid), uint32(gid)} - return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs) + return c.setstat(path, sshFileXferAttrUIDGID, attrs) } // Chmod changes the permissions of the named file. func (c *Client) Chmod(path string, mode os.FileMode) error { - return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode)) + return c.setstat(path, sshFileXferAttrPermissions, uint32(mode)) } // Truncate sets the size of the named file. Although it may be safely assumed @@ -427,7 +481,7 @@ func (c *Client) Chmod(path string, mode os.FileMode) error { // the SFTP protocol does not specify what behavior the server should do when setting // size greater than the current size. func (c *Client) Truncate(path string, size int64) error { - return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size)) + return c.setstat(path, sshFileXferAttrSize, uint64(size)) } // Open opens the named file for reading. If successful, methods on the @@ -455,14 +509,14 @@ func (c *Client) open(path string, pflags uint32) (*File, error) { return nil, err } switch typ { - case ssh_FXP_HANDLE: + case sshFxpHandle: sid, data := unmarshalUint32(data) if sid != id { return nil, &unexpectedIDErr{id, sid} } handle, _ := unmarshalString(data) return &File{c: c, path: path, handle: handle}, nil - case ssh_FXP_STATUS: + case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) default: return nil, unimplementedPacketErr(typ) @@ -482,7 +536,7 @@ func (c *Client) close(handle string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -499,14 +553,14 @@ func (c *Client) fstat(handle string) (*FileStat, error) { return nil, err } switch typ { - case ssh_FXP_ATTRS: + case sshFxpAttrs: sid, data := unmarshalUint32(data) if sid != id { return nil, &unexpectedIDErr{id, sid} } attr, _ := unmarshalAttrs(data) return attr, nil - case ssh_FXP_STATUS: + case sshFxpStatus: return nil, normaliseError(unmarshalStatus(id, data)) default: return nil, unimplementedPacketErr(typ) @@ -530,7 +584,7 @@ func (c *Client) StatVFS(path string) (*StatVFS, error) { switch typ { // server responded with valid data - case ssh_FXP_EXTENDED_REPLY: + case sshFxpExtendedReply: var response StatVFS err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) if err != nil { @@ -540,8 +594,8 @@ func (c *Client) StatVFS(path string) (*StatVFS, error) { return &response, nil // the resquest failed - case ssh_FXP_STATUS: - return nil, errors.New(fxp(ssh_FXP_STATUS).String()) + case sshFxpStatus: + return nil, errors.New(fxp(sshFxpStatus).String()) default: return nil, unimplementedPacketErr(typ) @@ -562,7 +616,7 @@ func (c *Client) Remove(path string) error { switch err.Code { // some servers, *cough* osx *cough*, return EPERM, not ENODIR. // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY - case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY: + case sshFxPermissionDenied, sshFxFailure, sshFxFileIsADirectory: return c.RemoveDirectory(path) } } @@ -579,7 +633,7 @@ func (c *Client) removeFile(path string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -597,7 +651,7 @@ func (c *Client) RemoveDirectory(path string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -616,7 +670,7 @@ func (c *Client) Rename(oldname, newname string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -636,7 +690,7 @@ func (c *Client) PosixRename(oldname, newname string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -653,7 +707,7 @@ func (c *Client) realpath(path string) (string, error) { return "", err } switch typ { - case ssh_FXP_NAME: + case sshFxpName: sid, data := unmarshalUint32(data) if sid != id { return "", &unexpectedIDErr{id, sid} @@ -664,7 +718,7 @@ func (c *Client) realpath(path string) (string, error) { } filename, _ := unmarshalString(data) // ignore attributes return filename, nil - case ssh_FXP_STATUS: + case sshFxpStatus: return "", normaliseError(unmarshalStatus(id, data)) default: return "", unimplementedPacketErr(typ) @@ -690,7 +744,7 @@ func (c *Client) Mkdir(path string) error { return err } switch typ { - case ssh_FXP_STATUS: + case sshFxpStatus: return normaliseError(unmarshalStatus(id, data)) default: return unimplementedPacketErr(typ) @@ -845,14 +899,14 @@ func (f *File) Read(b []byte) (int, error) { } delete(reqs, reqID) switch res.typ { - case ssh_FXP_STATUS: + case sshFxpStatus: if firstErr.err == nil || req.offset < firstErr.offset { firstErr = offsetErr{ offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data)), } } - case ssh_FXP_DATA: + case sshFxpData: l, data := unmarshalUint32(data) n := copy(req.b, data[:l]) read += n @@ -884,15 +938,26 @@ func (f *File) Read(b []byte) (int, error) { // maximise throughput for transferring the entire file (especially // over high latency links). func (f *File) WriteTo(w io.Writer) (int64, error) { - fi, err := f.c.Stat(f.path) - if err != nil { - return 0, err + var fileSize uint64 + if f.c.useFstat { + fileStat, err := f.c.fstat(f.handle) + if err != nil { + return 0, err + } + fileSize = fileStat.Size + + } else { + fi, err := f.c.Stat(f.path) + if err != nil { + return 0, err + } + fileSize = uint64(fi.Size()) } + inFlight := 0 desiredInFlight := 1 offset := f.offset writeOffset := offset - fileSize := uint64(fi.Size()) // see comment on same line in Read() above ch := make(chan result, f.c.maxConcurrentRequests+1) type inflightRead struct { @@ -934,7 +999,7 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { if inFlight == 0 { if firstErr.err == nil && len(pendingWrites) > 0 { - return copied, InternalInconsistency + return copied, ErrInternalInconsistency } break } @@ -952,11 +1017,11 @@ func (f *File) WriteTo(w io.Writer) (int64, error) { } delete(reqs, reqID) switch res.typ { - case ssh_FXP_STATUS: + case sshFxpStatus: if firstErr.err == nil || req.offset < firstErr.offset { firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))} } - case ssh_FXP_DATA: + case sshFxpData: l, data := unmarshalUint32(data) if req.offset == writeOffset { nbytes, err := w.Write(data) @@ -1071,7 +1136,7 @@ func (f *File) Write(b []byte) (int, error) { continue } switch res.typ { - case ssh_FXP_STATUS: + case sshFxpStatus: id, _ := unmarshalUint32(res.data) err := normaliseError(unmarshalStatus(id, res.data)) if err != nil && firstErr == nil { @@ -1139,7 +1204,7 @@ func (f *File) ReadFrom(r io.Reader) (int64, error) { continue } switch res.typ { - case ssh_FXP_STATUS: + case sshFxpStatus: id, _ := unmarshalUint32(res.data) err := normaliseError(unmarshalStatus(id, res.data)) if err != nil && firstErr == nil { @@ -1218,11 +1283,11 @@ func normaliseError(err error) error { switch err := err.(type) { case *StatusError: switch err.Code { - case ssh_FX_EOF: + case sshFxEOF: return io.EOF - case ssh_FX_NO_SUCH_FILE: + case sshFxNoSuchFile: return os.ErrNotExist - case ssh_FX_OK: + case sshFxOk: return nil default: return err @@ -1260,24 +1325,24 @@ func flags(f int) uint32 { var out uint32 switch f & os.O_WRONLY { case os.O_WRONLY: - out |= ssh_FXF_WRITE + out |= sshFxfWrite case os.O_RDONLY: - out |= ssh_FXF_READ + out |= sshFxfRead } if f&os.O_RDWR == os.O_RDWR { - out |= ssh_FXF_READ | ssh_FXF_WRITE + out |= sshFxfRead | sshFxfWrite } if f&os.O_APPEND == os.O_APPEND { - out |= ssh_FXF_APPEND + out |= sshFxfAppend } if f&os.O_CREATE == os.O_CREATE { - out |= ssh_FXF_CREAT + out |= sshFxfCreat } if f&os.O_TRUNC == os.O_TRUNC { - out |= ssh_FXF_TRUNC + out |= sshFxfTrunc } if f&os.O_EXCL == os.O_EXCL { - out |= ssh_FXF_EXCL + out |= sshFxfExcl } return out } diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go index 2f3be10a6..45d299568 100644 --- a/vendor/github.com/pkg/sftp/packet-manager.go +++ b/vendor/github.com/pkg/sftp/packet-manager.go @@ -39,7 +39,7 @@ func newPktMgr(sender packetSender) *packetManager { } //// packet ordering -func (s *packetManager) newOrderId() uint32 { +func (s *packetManager) newOrderID() uint32 { s.packetCount++ return s.packetCount } @@ -50,10 +50,10 @@ type orderedRequest struct { } func (s *packetManager) newOrderedRequest(p requestPacket) orderedRequest { - return orderedRequest{requestPacket: p, orderid: s.newOrderId()} + return orderedRequest{requestPacket: p, orderid: s.newOrderID()} } -func (p orderedRequest) orderId() uint32 { return p.orderid } -func (p orderedRequest) setOrderId(oid uint32) { p.orderid = oid } +func (p orderedRequest) orderID() uint32 { return p.orderid } +func (p orderedRequest) setOrderID(oid uint32) { p.orderid = oid } type orderedResponse struct { responsePacket @@ -64,18 +64,18 @@ func (s *packetManager) newOrderedResponse(p responsePacket, id uint32, ) orderedResponse { return orderedResponse{responsePacket: p, orderid: id} } -func (p orderedResponse) orderId() uint32 { return p.orderid } -func (p orderedResponse) setOrderId(oid uint32) { p.orderid = oid } +func (p orderedResponse) orderID() uint32 { return p.orderid } +func (p orderedResponse) setOrderID(oid uint32) { p.orderid = oid } type orderedPacket interface { id() uint32 - orderId() uint32 + orderID() uint32 } type orderedPackets []orderedPacket func (o orderedPackets) Sort() { sort.Slice(o, func(i, j int) bool { - return o[i].orderId() < o[j].orderId() + return o[i].orderID() < o[j].orderID() }) } @@ -145,11 +145,11 @@ func (s *packetManager) controller() { for { select { case pkt := <-s.requests: - debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderId()) + debug("incoming id (oid): %v (%v)", pkt.id(), pkt.orderID()) s.incoming = append(s.incoming, pkt) s.incoming.Sort() case pkt := <-s.responses: - debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderId()) + debug("outgoing id (oid): %v (%v)", pkt.id(), pkt.orderID()) s.outgoing = append(s.outgoing, pkt) s.outgoing.Sort() case <-s.fini: @@ -171,7 +171,7 @@ func (s *packetManager) maybeSendPackets() { in := s.incoming[0] // debug("incoming: %v", ids(s.incoming)) // debug("outgoing: %v", ids(s.outgoing)) - if in.orderId() == out.orderId() { + if in.orderID() == out.orderID() { debug("Sending packet: %v", out.id()) s.sender.sendPacket(out.(encoding.BinaryMarshaler)) // pop off heads diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go index bcff9bf33..addd0e32a 100644 --- a/vendor/github.com/pkg/sftp/packet-typing.go +++ b/vendor/github.com/pkg/sftp/packet-typing.go @@ -49,8 +49,9 @@ func (p sshFxpOpendirPacket) getPath() string { return p.Path } func (p sshFxpOpenPacket) getPath() string { return p.Path } func (p sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath } +func (p sshFxpExtendedPacketHardlink) getPath() string { return p.Oldpath } -// hasHandle +// getHandle func (p sshFxpFstatPacket) getHandle() string { return p.Handle } func (p sshFxpFsetstatPacket) getHandle() string { return p.Handle } func (p sshFxpReadPacket) getHandle() string { return p.Handle } @@ -68,6 +69,7 @@ func (p sshFxpRmdirPacket) notReadOnly() {} func (p sshFxpRenamePacket) notReadOnly() {} func (p sshFxpSymlinkPacket) notReadOnly() {} func (p sshFxpExtendedPacketPosixRename) notReadOnly() {} +func (p sshFxpExtendedPacketHardlink) notReadOnly() {} // some packets with ID are missing id() func (p sshFxpDataPacket) id() uint32 { return p.ID } @@ -82,45 +84,45 @@ func (p sshFxVersionPacket) id() uint32 { return 0 } func makePacket(p rxPacket) (requestPacket, error) { var pkt requestPacket switch p.pktType { - case ssh_FXP_INIT: + case sshFxpInit: pkt = &sshFxInitPacket{} - case ssh_FXP_LSTAT: + case sshFxpLstat: pkt = &sshFxpLstatPacket{} - case ssh_FXP_OPEN: + case sshFxpOpen: pkt = &sshFxpOpenPacket{} - case ssh_FXP_CLOSE: + case sshFxpClose: pkt = &sshFxpClosePacket{} - case ssh_FXP_READ: + case sshFxpRead: pkt = &sshFxpReadPacket{} - case ssh_FXP_WRITE: + case sshFxpWrite: pkt = &sshFxpWritePacket{} - case ssh_FXP_FSTAT: + case sshFxpFstat: pkt = &sshFxpFstatPacket{} - case ssh_FXP_SETSTAT: + case sshFxpSetstat: pkt = &sshFxpSetstatPacket{} - case ssh_FXP_FSETSTAT: + case sshFxpFsetstat: pkt = &sshFxpFsetstatPacket{} - case ssh_FXP_OPENDIR: + case sshFxpOpendir: pkt = &sshFxpOpendirPacket{} - case ssh_FXP_READDIR: + case sshFxpReaddir: pkt = &sshFxpReaddirPacket{} - case ssh_FXP_REMOVE: + case sshFxpRemove: pkt = &sshFxpRemovePacket{} - case ssh_FXP_MKDIR: + case sshFxpMkdir: pkt = &sshFxpMkdirPacket{} - case ssh_FXP_RMDIR: + case sshFxpRmdir: pkt = &sshFxpRmdirPacket{} - case ssh_FXP_REALPATH: + case sshFxpRealpath: pkt = &sshFxpRealpathPacket{} - case ssh_FXP_STAT: + case sshFxpStat: pkt = &sshFxpStatPacket{} - case ssh_FXP_RENAME: + case sshFxpRename: pkt = &sshFxpRenamePacket{} - case ssh_FXP_READLINK: + case sshFxpReadlink: pkt = &sshFxpReadlinkPacket{} - case ssh_FXP_SYMLINK: + case sshFxpSymlink: pkt = &sshFxpSymlinkPacket{} - case ssh_FXP_EXTENDED: + case sshFxpExtended: pkt = &sshFxpExtendedPacket{} default: return nil, errors.Errorf("unhandled packet type: %s", p.pktType) diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go index c5c32fc23..c687fd491 100644 --- a/vendor/github.com/pkg/sftp/packet.go +++ b/vendor/github.com/pkg/sftp/packet.go @@ -13,11 +13,13 @@ import ( ) var ( + errLongPacket = errors.New("packet too long") errShortPacket = errors.New("packet too short") errUnknownExtendedPacket = errors.New("unknown extended packet") ) const ( + maxMsgLength = 256 * 1024 debugDumpTxPacket = false debugDumpRxPacket = false debugDumpTxPacketBytes = false @@ -143,6 +145,10 @@ func recvPacket(r io.Reader) (uint8, []byte, error) { return 0, nil, err } l, _ := unmarshalUint32(b) + if l > maxMsgLength { + debug("recv packet %d bytes too long", l) + return 0, nil, errLongPacket + } b = make([]byte, l) if _, err := io.ReadFull(r, b); err != nil { debug("recv packet %d bytes: err %v", l, err) @@ -189,7 +195,7 @@ func (p sshFxInitPacket) MarshalBinary() ([]byte, error) { } b := make([]byte, 0, l) - b = append(b, ssh_FXP_INIT) + b = append(b, sshFxpInit) b = marshalUint32(b, p.Version) for _, e := range p.Extensions { b = marshalString(b, e.Name) @@ -216,9 +222,11 @@ func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { type sshFxVersionPacket struct { Version uint32 - Extensions []struct { - Name, Data string - } + Extensions []sshExtensionPair +} + +type sshExtensionPair struct { + Name, Data string } func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { @@ -228,7 +236,7 @@ func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { } b := make([]byte, 0, l) - b = append(b, ssh_FXP_VERSION) + b = append(b, sshFxpVersion) b = marshalUint32(b, p.Version) for _, e := range p.Extensions { b = marshalString(b, e.Name) @@ -266,7 +274,7 @@ type sshFxpReaddirPacket struct { func (p sshFxpReaddirPacket) id() uint32 { return p.ID } func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle) + return marshalIDString(sshFxpReaddir, p.ID, p.Handle) } func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { @@ -281,7 +289,7 @@ type sshFxpOpendirPacket struct { func (p sshFxpOpendirPacket) id() uint32 { return p.ID } func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path) + return marshalIDString(sshFxpOpendir, p.ID, p.Path) } func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { @@ -296,7 +304,7 @@ type sshFxpLstatPacket struct { func (p sshFxpLstatPacket) id() uint32 { return p.ID } func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path) + return marshalIDString(sshFxpLstat, p.ID, p.Path) } func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { @@ -311,7 +319,7 @@ type sshFxpStatPacket struct { func (p sshFxpStatPacket) id() uint32 { return p.ID } func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_STAT, p.ID, p.Path) + return marshalIDString(sshFxpStat, p.ID, p.Path) } func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { @@ -326,7 +334,7 @@ type sshFxpFstatPacket struct { func (p sshFxpFstatPacket) id() uint32 { return p.ID } func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle) + return marshalIDString(sshFxpFstat, p.ID, p.Handle) } func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { @@ -341,7 +349,7 @@ type sshFxpClosePacket struct { func (p sshFxpClosePacket) id() uint32 { return p.ID } func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle) + return marshalIDString(sshFxpClose, p.ID, p.Handle) } func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { @@ -356,7 +364,7 @@ type sshFxpRemovePacket struct { func (p sshFxpRemovePacket) id() uint32 { return p.ID } func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename) + return marshalIDString(sshFxpRemove, p.ID, p.Filename) } func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { @@ -371,7 +379,7 @@ type sshFxpRmdirPacket struct { func (p sshFxpRmdirPacket) id() uint32 { return p.ID } func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path) + return marshalIDString(sshFxpRmdir, p.ID, p.Path) } func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { @@ -392,7 +400,7 @@ func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { 4 + len(p.Linkpath) b := make([]byte, 0, l) - b = append(b, ssh_FXP_SYMLINK) + b = append(b, sshFxpSymlink) b = marshalUint32(b, p.ID) b = marshalString(b, p.Targetpath) b = marshalString(b, p.Linkpath) @@ -411,6 +419,30 @@ func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { return nil } +type sshFxpHardlinkPacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p sshFxpHardlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpHardlinkPacket) MarshalBinary() ([]byte, error) { + const ext = "hardlink@openssh.com" + l := 1 + 4 + // type(byte) + uint32 + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 0, l) + b = append(b, sshFxpExtended) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + return b, nil +} + type sshFxpReadlinkPacket struct { ID uint32 Path string @@ -419,7 +451,7 @@ type sshFxpReadlinkPacket struct { func (p sshFxpReadlinkPacket) id() uint32 { return p.ID } func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path) + return marshalIDString(sshFxpReadlink, p.ID, p.Path) } func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { @@ -434,7 +466,7 @@ type sshFxpRealpathPacket struct { func (p sshFxpRealpathPacket) id() uint32 { return p.ID } func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path) + return marshalIDString(sshFxpRealpath, p.ID, p.Path) } func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { @@ -464,7 +496,7 @@ type sshFxpNamePacket struct { func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) { b := []byte{} - b = append(b, ssh_FXP_NAME) + b = append(b, sshFxpName) b = marshalUint32(b, p.ID) b = marshalUint32(b, uint32(len(p.NameAttrs))) for _, na := range p.NameAttrs { @@ -493,7 +525,7 @@ func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) { 4 + 4 b := make([]byte, 0, l) - b = append(b, ssh_FXP_OPEN) + b = append(b, sshFxpOpen) b = marshalUint32(b, p.ID) b = marshalString(b, p.Path) b = marshalUint32(b, p.Pflags) @@ -530,7 +562,7 @@ func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) { 8 + 4 // uint64 + uint32 b := make([]byte, 0, l) - b = append(b, ssh_FXP_READ) + b = append(b, sshFxpRead) b = marshalUint32(b, p.ID) b = marshalString(b, p.Handle) b = marshalUint64(b, p.Offset) @@ -566,7 +598,7 @@ func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) { 4 + len(p.Newpath) b := make([]byte, 0, l) - b = append(b, ssh_FXP_RENAME) + b = append(b, sshFxpRename) b = marshalUint32(b, p.ID) b = marshalString(b, p.Oldpath) b = marshalString(b, p.Newpath) @@ -601,7 +633,7 @@ func (p sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { 4 + len(p.Newpath) b := make([]byte, 0, l) - b = append(b, ssh_FXP_EXTENDED) + b = append(b, sshFxpExtended) b = marshalUint32(b, p.ID) b = marshalString(b, ext) b = marshalString(b, p.Oldpath) @@ -626,7 +658,7 @@ func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) { len(p.Data) b := make([]byte, 0, l) - b = append(b, ssh_FXP_WRITE) + b = append(b, sshFxpWrite) b = marshalUint32(b, p.ID) b = marshalString(b, p.Handle) b = marshalUint64(b, p.Offset) @@ -667,7 +699,7 @@ func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { 4 // uint32 b := make([]byte, 0, l) - b = append(b, ssh_FXP_MKDIR) + b = append(b, sshFxpMkdir) b = marshalUint32(b, p.ID) b = marshalString(b, p.Path) b = marshalUint32(b, p.Flags) @@ -709,7 +741,7 @@ func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { 4 // uint32 + uint64 b := make([]byte, 0, l) - b = append(b, ssh_FXP_SETSTAT) + b = append(b, sshFxpSetstat) b = marshalUint32(b, p.ID) b = marshalString(b, p.Path) b = marshalUint32(b, p.Flags) @@ -723,7 +755,7 @@ func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { 4 // uint32 + uint64 b := make([]byte, 0, l) - b = append(b, ssh_FXP_FSETSTAT) + b = append(b, sshFxpFsetstat) b = marshalUint32(b, p.ID) b = marshalString(b, p.Handle) b = marshalUint32(b, p.Flags) @@ -763,7 +795,7 @@ type sshFxpHandlePacket struct { } func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_HANDLE} + b := []byte{sshFxpHandle} b = marshalUint32(b, p.ID) b = marshalString(b, p.Handle) return b, nil @@ -775,7 +807,7 @@ type sshFxpStatusPacket struct { } func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_STATUS} + b := []byte{sshFxpStatus} b = marshalUint32(b, p.ID) b = marshalStatus(b, p.StatusError) return b, nil @@ -788,7 +820,7 @@ type sshFxpDataPacket struct { } func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_DATA} + b := []byte{sshFxpData} b = marshalUint32(b, p.ID) b = marshalUint32(b, p.Length) b = append(b, p.Data[:p.Length]...) @@ -823,7 +855,7 @@ func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { len("statvfs@openssh.com") b := make([]byte, 0, l) - b = append(b, ssh_FXP_EXTENDED) + b = append(b, sshFxpExtended) b = marshalUint32(b, p.ID) b = marshalString(b, "statvfs@openssh.com") b = marshalString(b, p.Path) @@ -856,10 +888,10 @@ func (p *StatVFS) FreeSpace() uint64 { return p.Frsize * p.Bfree } -// Convert to ssh_FXP_EXTENDED_REPLY packet binary format +// MarshalBinary converts to ssh_FXP_EXTENDED_REPLY packet binary format func (p *StatVFS) MarshalBinary() ([]byte, error) { var buf bytes.Buffer - buf.Write([]byte{ssh_FXP_EXTENDED_REPLY}) + buf.Write([]byte{sshFxpExtendedReply}) err := binary.Write(&buf, binary.BigEndian, p) return buf.Bytes(), err } @@ -903,6 +935,8 @@ func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} case "posix-rename@openssh.com": p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} + case "hardlink@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketHardlink{} default: return errors.Wrapf(errUnknownExtendedPacket, "packet type %v", p.SpecificPacket) } @@ -957,3 +991,32 @@ func (p sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { err := os.Rename(p.Oldpath, p.Newpath) return statusFromError(p, err) } + +type sshFxpExtendedPacketHardlink struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +func (p sshFxpExtendedPacketHardlink) id() uint32 { return p.ID } +func (p sshFxpExtendedPacketHardlink) readonly() bool { return true } +func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { + err := os.Link(p.Oldpath, p.Newpath) + return statusFromError(p, err) +} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go index 31b1eaa66..7c2e5c122 100644 --- a/vendor/github.com/pkg/sftp/request-attrs.go +++ b/vendor/github.com/pkg/sftp/request-attrs.go @@ -5,7 +5,7 @@ package sftp // request and AttrFlags() and Attributes() when working with SetStat requests. import "os" -// File Open and Write Flags. Correlate directly with with os.OpenFile flags +// FileOpenFlags defines Open and Write Flags. Correlate directly with with os.OpenFile flags // (https://golang.org/pkg/os/#pkg-constants). type FileOpenFlags struct { Read, Write, Append, Creat, Trunc, Excl bool @@ -13,12 +13,12 @@ type FileOpenFlags struct { func newFileOpenFlags(flags uint32) FileOpenFlags { return FileOpenFlags{ - Read: flags&ssh_FXF_READ != 0, - Write: flags&ssh_FXF_WRITE != 0, - Append: flags&ssh_FXF_APPEND != 0, - Creat: flags&ssh_FXF_CREAT != 0, - Trunc: flags&ssh_FXF_TRUNC != 0, - Excl: flags&ssh_FXF_EXCL != 0, + Read: flags&sshFxfRead != 0, + Write: flags&sshFxfWrite != 0, + Append: flags&sshFxfAppend != 0, + Creat: flags&sshFxfCreat != 0, + Trunc: flags&sshFxfTrunc != 0, + Excl: flags&sshFxfExcl != 0, } } @@ -28,7 +28,7 @@ func (r *Request) Pflags() FileOpenFlags { return newFileOpenFlags(r.Flags) } -// Flags that indicate whether SFTP file attributes were passed. When a flag is +// FileAttrFlags that indicate whether SFTP file attributes were passed. When a flag is // true the corresponding attribute should be available from the FileStat // object returned by Attributes method. Used with SetStat. type FileAttrFlags struct { @@ -37,14 +37,14 @@ type FileAttrFlags struct { func newFileAttrFlags(flags uint32) FileAttrFlags { return FileAttrFlags{ - Size: (flags & ssh_FILEXFER_ATTR_SIZE) != 0, - UidGid: (flags & ssh_FILEXFER_ATTR_UIDGID) != 0, - Permissions: (flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0, - Acmodtime: (flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0, + Size: (flags & sshFileXferAttrSize) != 0, + UidGid: (flags & sshFileXferAttrUIDGID) != 0, + Permissions: (flags & sshFileXferAttrPermissions) != 0, + Acmodtime: (flags & sshFileXferAttrACmodTime) != 0, } } -// FileAttrFlags returns a FileAttrFlags boolean struct based on the +// AttrFlags returns a FileAttrFlags boolean struct based on the // bitmap/uint32 file attribute flags from the SFTP packaet. func (r *Request) AttrFlags() FileAttrFlags { return newFileAttrFlags(r.Flags) @@ -55,7 +55,7 @@ func (a FileStat) FileMode() os.FileMode { return os.FileMode(a.Mode) } -// Attributres parses file attributes byte blob and return them in a +// Attributes parses file attributes byte blob and return them in a // FileStat object. func (r *Request) Attributes() *FileStat { fs, _ := getFileStat(r.Flags, r.Attrs) diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go index 00451e74d..e4b4b86dc 100644 --- a/vendor/github.com/pkg/sftp/request-errors.go +++ b/vendor/github.com/pkg/sftp/request-errors.go @@ -1,40 +1,52 @@ package sftp +type fxerr uint32 + // Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more // direct control of the errors being sent vs. letting the library work them // out from the standard os/io errors. - -type fxerr uint32 - const ( - ErrSshFxOk = fxerr(ssh_FX_OK) - ErrSshFxEof = fxerr(ssh_FX_EOF) - ErrSshFxNoSuchFile = fxerr(ssh_FX_NO_SUCH_FILE) - ErrSshFxPermissionDenied = fxerr(ssh_FX_PERMISSION_DENIED) - ErrSshFxFailure = fxerr(ssh_FX_FAILURE) - ErrSshFxBadMessage = fxerr(ssh_FX_BAD_MESSAGE) - ErrSshFxNoConnection = fxerr(ssh_FX_NO_CONNECTION) - ErrSshFxConnectionLost = fxerr(ssh_FX_CONNECTION_LOST) - ErrSshFxOpUnsupported = fxerr(ssh_FX_OP_UNSUPPORTED) + ErrSSHFxOk = fxerr(sshFxOk) + ErrSSHFxEOF = fxerr(sshFxEOF) + ErrSSHFxNoSuchFile = fxerr(sshFxNoSuchFile) + ErrSSHFxPermissionDenied = fxerr(sshFxPermissionDenied) + ErrSSHFxFailure = fxerr(sshFxFailure) + ErrSSHFxBadMessage = fxerr(sshFxBadMessage) + ErrSSHFxNoConnection = fxerr(sshFxNoConnection) + ErrSSHFxConnectionLost = fxerr(sshFxConnectionLost) + ErrSSHFxOpUnsupported = fxerr(sshFxOPUnsupported) +) + +// Deprecated error types, these are aliases for the new ones, please use the new ones directly +const ( + ErrSshFxOk = ErrSSHFxOk + ErrSshFxEof = ErrSSHFxEOF + ErrSshFxNoSuchFile = ErrSSHFxNoSuchFile + ErrSshFxPermissionDenied = ErrSSHFxPermissionDenied + ErrSshFxFailure = ErrSSHFxFailure + ErrSshFxBadMessage = ErrSSHFxBadMessage + ErrSshFxNoConnection = ErrSSHFxNoConnection + ErrSshFxConnectionLost = ErrSSHFxConnectionLost + ErrSshFxOpUnsupported = ErrSSHFxOpUnsupported ) func (e fxerr) Error() string { switch e { - case ErrSshFxOk: + case ErrSSHFxOk: return "OK" - case ErrSshFxEof: + case ErrSSHFxEOF: return "EOF" - case ErrSshFxNoSuchFile: + case ErrSSHFxNoSuchFile: return "No Such File" - case ErrSshFxPermissionDenied: + case ErrSSHFxPermissionDenied: return "Permission Denied" - case ErrSshFxBadMessage: + case ErrSSHFxBadMessage: return "Bad Message" - case ErrSshFxNoConnection: + case ErrSSHFxNoConnection: return "No Connection" - case ErrSshFxConnectionLost: + case ErrSSHFxConnectionLost: return "Connection Lost" - case ErrSshFxOpUnsupported: + case ErrSSHFxOpUnsupported: return "Operation Unsupported" default: return "Failure" diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go index e5abd1842..9f6ae0e69 100644 --- a/vendor/github.com/pkg/sftp/request-example.go +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -11,6 +11,7 @@ import ( "os" "path/filepath" "sort" + "strings" "sync" "syscall" "time" @@ -90,18 +91,51 @@ func (fs *root) Filecmd(r *Request) error { file.name = r.Target fs.files[r.Target] = file delete(fs.files, r.Filepath) + + if file.IsDir() { + for path, file := range fs.files { + if strings.HasPrefix(path, r.Filepath+"/") { + file.name = r.Target + path[len(r.Filepath):] + fs.files[r.Target+path[len(r.Filepath):]] = file + delete(fs.files, path) + } + } + } case "Rmdir", "Remove": - _, err := fs.fetch(filepath.Dir(r.Filepath)) + file, err := fs.fetch(filepath.Dir(r.Filepath)) if err != nil { return err } + + if file.IsDir() { + for path := range fs.files { + if strings.HasPrefix(path, r.Filepath+"/") { + return &os.PathError{ + Op: "remove", + Path: r.Filepath + "/", + Err: fmt.Errorf("directory is not empty"), + } + } + } + } + delete(fs.files, r.Filepath) + case "Mkdir": _, err := fs.fetch(filepath.Dir(r.Filepath)) if err != nil { return err } fs.files[r.Filepath] = newMemFile(r.Filepath, true) + case "Link": + file, err := fs.fetch(r.Filepath) + if err != nil { + return err + } + if file.IsDir() { + return fmt.Errorf("hard link not allowed for directory") + } + fs.files[r.Target] = file case "Symlink": _, err := fs.fetch(r.Filepath) if err != nil { @@ -147,15 +181,15 @@ func (fs *root) Filelist(r *Request) (ListerAt, error) { if !file.IsDir() { return nil, syscall.ENOTDIR } - ordered_names := []string{} - for fn, _ := range fs.files { + orderedNames := []string{} + for fn := range fs.files { if filepath.Dir(fn) == r.Filepath { - ordered_names = append(ordered_names, fn) + orderedNames = append(orderedNames, fn) } } - sort.Strings(ordered_names) - list := make([]os.FileInfo, len(ordered_names)) - for i, fn := range ordered_names { + sort.Strings(orderedNames) + list := make([]os.FileInfo, len(orderedNames)) + for i, fn := range orderedNames { list[i] = fs.files[fn] } return listerat(list), nil @@ -199,13 +233,15 @@ func (fs *root) fetch(path string) (*memFile, error) { // Implements os.FileInfo, Reader and Writer interfaces. // These are the 3 interfaces necessary for the Handlers. +// Implements the optional interface TransferError. type memFile struct { - name string - modtime time.Time - symlink string - isdir bool - content []byte - contentLock sync.RWMutex + name string + modtime time.Time + symlink string + isdir bool + content []byte + transferError error + contentLock sync.RWMutex } // factory to make sure modtime is set @@ -265,3 +301,7 @@ func (f *memFile) WriteAt(p []byte, off int64) (int, error) { copy(f.content[off:], p) return len(p), nil } + +func (f *memFile) TransferError(err error) { + f.transferError = err +} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go index f69cedc5c..dd224cdad 100644 --- a/vendor/github.com/pkg/sftp/request-interfaces.go +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -25,6 +25,8 @@ type FileReader interface { // The request server code will call Close() on the returned io.WriterAt // ojbect if an io.Closer type assertion succeeds. // Note in cases of an error, the error text will be sent to the client. +// Note when receiving an Append flag it is important to not open files using +// O_APPEND if you plan to use WriteAt, as they conflict. // Called for Methods: Put, Open type FileWriter interface { Filewrite(*Request) (io.WriterAt, error) @@ -32,7 +34,7 @@ type FileWriter interface { // FileCmder should return an error // Note in cases of an error, the error text will be sent to the client. -// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Symlink, Remove +// Called for Methods: Setstat, Rename, Rmdir, Mkdir, Link, Symlink, Remove type FileCmder interface { Filecmd(*Request) error } @@ -53,3 +55,10 @@ type FileLister interface { type ListerAt interface { ListAt([]os.FileInfo, int64) (int, error) } + +// TransferError is an optional interface that readerAt and writerAt +// can implement to be notified about the error causing Serve() to exit +// with the request still open +type TransferError interface { + TransferError(err error) +} diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go index 2e99720a9..d15f3a110 100644 --- a/vendor/github.com/pkg/sftp/request-server.go +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -116,11 +116,7 @@ func (rs *RequestServer) Serve() error { if err != nil { switch errors.Cause(err) { case errUnknownExtendedPacket: - if err := rs.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { - debug("failed to send err packet: %v", err) - rs.conn.Close() // shuts down recvPacket - break - } + // do nothing default: debug("makePacket err: %v", err) rs.conn.Close() // shuts down recvPacket @@ -137,6 +133,20 @@ func (rs *RequestServer) Serve() error { // make sure all open requests are properly closed // (eg. possible on dropped connections, client crashes, etc.) for handle, req := range rs.openRequests { + if err != nil { + req.state.RLock() + writer := req.state.writerAt + reader := req.state.readerAt + req.state.RUnlock() + if t, ok := writer.(TransferError); ok { + debug("notify error: %v to writer: %v\n", err, writer) + t.TransferError(err) + } + if t, ok := reader.(TransferError); ok { + debug("notify error: %v to reader: %v\n", err, reader) + t.TransferError(err) + } + } delete(rs.openRequests, handle) req.close() } @@ -148,10 +158,16 @@ func (rs *RequestServer) packetWorker( ctx context.Context, pktChan chan orderedRequest, ) error { for pkt := range pktChan { + if epkt, ok := pkt.requestPacket.(*sshFxpExtendedPacket); ok { + if epkt.SpecificPacket != nil { + pkt.requestPacket = epkt.SpecificPacket + } + } + var rpkt responsePacket switch pkt := pkt.requestPacket.(type) { case *sshFxInitPacket: - rpkt = sshFxVersionPacket{Version: sftpProtocolVersion} + rpkt = sshFxVersionPacket{Version: sftpProtocolVersion, Extensions: sftpExtensions} case *sshFxpClosePacket: handle := pkt.getHandle() rpkt = statusFromError(pkt, rs.closeRequest(handle)) @@ -174,6 +190,10 @@ func (rs *RequestServer) packetWorker( request = NewRequest("Stat", request.Filepath) rpkt = request.call(rs.Handlers, pkt) } + case *sshFxpExtendedPacketPosixRename: + request := NewRequest("Rename", pkt.Oldpath) + request.Target = pkt.Newpath + rpkt = request.call(rs.Handlers, pkt) case hasHandle: handle := pkt.getHandle() request, ok := rs.getRequest(handle) @@ -187,11 +207,11 @@ func (rs *RequestServer) packetWorker( rpkt = request.call(rs.Handlers, pkt) request.close() default: - return errors.Errorf("unexpected packet type %T", pkt) + rpkt = statusFromError(pkt, ErrSSHFxOpUnsupported) } rs.pktMgr.readyPacket( - rs.pktMgr.newOrderedResponse(rpkt, pkt.orderId())) + rs.pktMgr.newOrderedResponse(rpkt, pkt.orderID())) } return nil } diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go index a71a8980a..e2c586f06 100644 --- a/vendor/github.com/pkg/sftp/request-unix.go +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -14,10 +14,10 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { fstat := sys.(*FileStat) if fstat.UID != uint32(65534) { - return errors.New("Uid failed to match.") + return errors.New("Uid failed to match") } if fstat.GID != uint32(65534) { - return errors.New("Gid failed to match:") + return errors.New("Gid failed to match") } return nil } diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go index e694e5f1b..37864612f 100644 --- a/vendor/github.com/pkg/sftp/request.go +++ b/vendor/github.com/pkg/sftp/request.go @@ -18,7 +18,7 @@ var MaxFilelist int64 = 100 // Request contains the data and state for the incoming service request. type Request struct { // Get, Put, Setstat, Stat, Rename, Remove - // Rmdir, Mkdir, List, Readlink, Symlink + // Rmdir, Mkdir, List, Readlink, Link, Symlink Method string Filepath string Flags uint32 @@ -56,6 +56,8 @@ func requestFromPacket(ctx context.Context, pkt hasPath) *Request { request.Target = cleanPath(p.Newpath) case *sshFxpSymlinkPacket: request.Target = cleanPath(p.Linkpath) + case *sshFxpExtendedPacketHardlink: + request.Target = cleanPath(p.Newpath) } return request } @@ -158,7 +160,7 @@ func (r *Request) call(handlers Handlers, pkt requestPacket) responsePacket { return fileget(handlers.FileGet, r, pkt) case "Put": return fileput(handlers.FilePut, r, pkt) - case "Setstat", "Rename", "Rmdir", "Mkdir", "Symlink", "Remove": + case "Setstat", "Rename", "Rmdir", "Mkdir", "Link", "Symlink", "Remove": return filecmd(handlers.FileCmd, r, pkt) case "List": return filelist(handlers.FileList, r, pkt) @@ -378,6 +380,8 @@ func requestMethod(p requestPacket) (method string) { method = "Readlink" case *sshFxpMkdirPacket: method = "Mkdir" + case *sshFxpExtendedPacketHardlink: + method = "Link" } return method } diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go index 0fac1b668..905b75c29 100644 --- a/vendor/github.com/pkg/sftp/server.go +++ b/vendor/github.com/pkg/sftp/server.go @@ -18,6 +18,7 @@ import ( ) const ( + // SftpServerWorkerCount defines the number of workers for the SFTP server SftpServerWorkerCount = 8 ) @@ -141,7 +142,7 @@ func (svr *Server) sftpServerWorker(pktChan chan orderedRequest) error { if !readonly && svr.readOnly { svr.sendPacket(orderedResponse{ responsePacket: statusFromError(pkt, syscall.EPERM), - orderid: pkt.orderId()}) + orderid: pkt.orderID()}) continue } @@ -156,7 +157,10 @@ func handlePacket(s *Server, p orderedRequest) error { var rpkt responsePacket switch p := p.requestPacket.(type) { case *sshFxInitPacket: - rpkt = sshFxVersionPacket{Version: sftpProtocolVersion} + rpkt = sshFxVersionPacket{ + Version: sftpProtocolVersion, + Extensions: sftpExtensions, + } case *sshFxpStatPacket: // stat the requested file info, err := os.Stat(p.Path) @@ -246,7 +250,7 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = sshFxpOpenPacket{ ID: p.ID, Path: p.Path, - Pflags: ssh_FXF_READ, + Pflags: sshFxfRead, }.respond(s) } case *sshFxpReadPacket: @@ -276,13 +280,19 @@ func handlePacket(s *Server, p orderedRequest) error { _, err = f.WriteAt(p.Data, int64(p.Offset)) } rpkt = statusFromError(p, err) + case *sshFxpExtendedPacket: + if p.SpecificPacket == nil { + rpkt = statusFromError(p, ErrSSHFxOpUnsupported) + } else { + rpkt = p.respond(s) + } case serverRespondablePacket: rpkt = p.respond(s) default: return errors.Errorf("unexpected packet type %T", p) } - s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, p.orderId())) + s.pktMgr.readyPacket(s.pktMgr.newOrderedResponse(rpkt, p.orderID())) return nil } @@ -315,11 +325,11 @@ func (svr *Server) Serve() error { if err != nil { switch errors.Cause(err) { case errUnknownExtendedPacket: - if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { - debug("failed to send err packet: %v", err) - svr.conn.Close() // shuts down recvPacket - break - } + //if err := svr.serverConn.sendError(pkt, ErrSshFxOpUnsupported); err != nil { + // debug("failed to send err packet: %v", err) + // svr.conn.Close() // shuts down recvPacket + // break + //} default: debug("makePacket err: %v", err) svr.conn.Close() // shuts down recvPacket @@ -354,7 +364,7 @@ type sshFxpStatResponse struct { } func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_ATTRS} + b := []byte{sshFxpAttrs} b = marshalUint32(b, p.ID) b = marshalFileInfo(b, p.info) return b, nil @@ -363,7 +373,7 @@ func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { var emptyFileStat = []interface{}{uint32(0)} func (p sshFxpOpenPacket) readonly() bool { - return !p.hasPflags(ssh_FXF_WRITE) + return !p.hasPflags(sshFxfWrite) } func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { @@ -377,27 +387,27 @@ func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { func (p sshFxpOpenPacket) respond(svr *Server) responsePacket { var osFlags int - if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) { + if p.hasPflags(sshFxfRead, sshFxfWrite) { osFlags |= os.O_RDWR - } else if p.hasPflags(ssh_FXF_WRITE) { + } else if p.hasPflags(sshFxfWrite) { osFlags |= os.O_WRONLY - } else if p.hasPflags(ssh_FXF_READ) { + } else if p.hasPflags(sshFxfRead) { osFlags |= os.O_RDONLY } else { // how are they opening? return statusFromError(p, syscall.EINVAL) } - if p.hasPflags(ssh_FXF_APPEND) { - osFlags |= os.O_APPEND - } - if p.hasPflags(ssh_FXF_CREAT) { + // Don't use O_APPEND flag as it conflicts with WriteAt. + // The sshFxfAppend flag is a no-op here as the client sends the offsets. + + if p.hasPflags(sshFxfCreat) { osFlags |= os.O_CREATE } - if p.hasPflags(ssh_FXF_TRUNC) { + if p.hasPflags(sshFxfTrunc) { osFlags |= os.O_TRUNC } - if p.hasPflags(ssh_FXF_EXCL) { + if p.hasPflags(sshFxfExcl) { osFlags |= os.O_EXCL } @@ -439,19 +449,19 @@ func (p sshFxpSetstatPacket) respond(svr *Server) responsePacket { var err error debug("setstat name \"%s\"", p.Path) - if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + if (p.Flags & sshFileXferAttrSize) != 0 { var size uint64 if size, b, err = unmarshalUint64Safe(b); err == nil { err = os.Truncate(p.Path, int64(size)) } } - if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + if (p.Flags & sshFileXferAttrPermissions) != 0 { var mode uint32 if mode, b, err = unmarshalUint32Safe(b); err == nil { err = os.Chmod(p.Path, os.FileMode(mode)) } } - if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + if (p.Flags & sshFileXferAttrACmodTime) != 0 { var atime uint32 var mtime uint32 if atime, b, err = unmarshalUint32Safe(b); err != nil { @@ -462,7 +472,7 @@ func (p sshFxpSetstatPacket) respond(svr *Server) responsePacket { err = os.Chtimes(p.Path, atimeT, mtimeT) } } - if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + if (p.Flags & sshFileXferAttrUIDGID) != 0 { var uid uint32 var gid uint32 if uid, b, err = unmarshalUint32Safe(b); err != nil { @@ -486,19 +496,19 @@ func (p sshFxpFsetstatPacket) respond(svr *Server) responsePacket { var err error debug("fsetstat name \"%s\"", f.Name()) - if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + if (p.Flags & sshFileXferAttrSize) != 0 { var size uint64 if size, b, err = unmarshalUint64Safe(b); err == nil { err = f.Truncate(int64(size)) } } - if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + if (p.Flags & sshFileXferAttrPermissions) != 0 { var mode uint32 if mode, b, err = unmarshalUint32Safe(b); err == nil { err = f.Chmod(os.FileMode(mode)) } } - if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + if (p.Flags & sshFileXferAttrACmodTime) != 0 { var atime uint32 var mtime uint32 if atime, b, err = unmarshalUint32Safe(b); err != nil { @@ -509,7 +519,7 @@ func (p sshFxpFsetstatPacket) respond(svr *Server) responsePacket { err = os.Chtimes(f.Name(), atimeT, mtimeT) } } - if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + if (p.Flags & sshFileXferAttrUIDGID) != 0 { var uid uint32 var gid uint32 if uid, b, err = unmarshalUint32Safe(b); err != nil { @@ -526,30 +536,30 @@ func (p sshFxpFsetstatPacket) respond(svr *Server) responsePacket { func translateErrno(errno syscall.Errno) uint32 { switch errno { case 0: - return ssh_FX_OK + return sshFxOk case syscall.ENOENT: - return ssh_FX_NO_SUCH_FILE + return sshFxNoSuchFile case syscall.EPERM: - return ssh_FX_PERMISSION_DENIED + return sshFxPermissionDenied } - return ssh_FX_FAILURE + return sshFxFailure } func statusFromError(p ider, err error) sshFxpStatusPacket { ret := sshFxpStatusPacket{ ID: p.id(), StatusError: StatusError{ - // ssh_FX_OK = 0 - // ssh_FX_EOF = 1 - // ssh_FX_NO_SUCH_FILE = 2 ENOENT - // ssh_FX_PERMISSION_DENIED = 3 - // ssh_FX_FAILURE = 4 - // ssh_FX_BAD_MESSAGE = 5 - // ssh_FX_NO_CONNECTION = 6 - // ssh_FX_CONNECTION_LOST = 7 - // ssh_FX_OP_UNSUPPORTED = 8 - Code: ssh_FX_OK, + // sshFXOk = 0 + // sshFXEOF = 1 + // sshFXNoSuchFile = 2 ENOENT + // sshFXPermissionDenied = 3 + // sshFXFailure = 4 + // sshFXBadMessage = 5 + // sshFXNoConnection = 6 + // sshFXConnectionLost = 7 + // sshFXOPUnsupported = 8 + Code: sshFxOk, }, } if err == nil { @@ -557,7 +567,7 @@ func statusFromError(p ider, err error) sshFxpStatusPacket { } debug("statusFromError: error is %T %#v", err, err) - ret.StatusError.Code = ssh_FX_FAILURE + ret.StatusError.Code = sshFxFailure ret.StatusError.msg = err.Error() switch e := err.(type) { @@ -573,9 +583,9 @@ func statusFromError(p ider, err error) sshFxpStatusPacket { default: switch e { case io.EOF: - ret.StatusError.Code = ssh_FX_EOF + ret.StatusError.Code = sshFxEOF case os.ErrNotExist: - ret.StatusError.Code = ssh_FX_NO_SUCH_FILE + ret.StatusError.Code = sshFxNoSuchFile } } diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go index 3cdb14df8..28b1a08e1 100644 --- a/vendor/github.com/pkg/sftp/sftp.go +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -9,139 +9,148 @@ import ( ) const ( - ssh_FXP_INIT = 1 - ssh_FXP_VERSION = 2 - ssh_FXP_OPEN = 3 - ssh_FXP_CLOSE = 4 - ssh_FXP_READ = 5 - ssh_FXP_WRITE = 6 - ssh_FXP_LSTAT = 7 - ssh_FXP_FSTAT = 8 - ssh_FXP_SETSTAT = 9 - ssh_FXP_FSETSTAT = 10 - ssh_FXP_OPENDIR = 11 - ssh_FXP_READDIR = 12 - ssh_FXP_REMOVE = 13 - ssh_FXP_MKDIR = 14 - ssh_FXP_RMDIR = 15 - ssh_FXP_REALPATH = 16 - ssh_FXP_STAT = 17 - ssh_FXP_RENAME = 18 - ssh_FXP_READLINK = 19 - ssh_FXP_SYMLINK = 20 - ssh_FXP_STATUS = 101 - ssh_FXP_HANDLE = 102 - ssh_FXP_DATA = 103 - ssh_FXP_NAME = 104 - ssh_FXP_ATTRS = 105 - ssh_FXP_EXTENDED = 200 - ssh_FXP_EXTENDED_REPLY = 201 + sshFxpInit = 1 + sshFxpVersion = 2 + sshFxpOpen = 3 + sshFxpClose = 4 + sshFxpRead = 5 + sshFxpWrite = 6 + sshFxpLstat = 7 + sshFxpFstat = 8 + sshFxpSetstat = 9 + sshFxpFsetstat = 10 + sshFxpOpendir = 11 + sshFxpReaddir = 12 + sshFxpRemove = 13 + sshFxpMkdir = 14 + sshFxpRmdir = 15 + sshFxpRealpath = 16 + sshFxpStat = 17 + sshFxpRename = 18 + sshFxpReadlink = 19 + sshFxpSymlink = 20 + sshFxpStatus = 101 + sshFxpHandle = 102 + sshFxpData = 103 + sshFxpName = 104 + sshFxpAttrs = 105 + sshFxpExtended = 200 + sshFxpExtendedReply = 201 ) const ( - ssh_FX_OK = 0 - ssh_FX_EOF = 1 - ssh_FX_NO_SUCH_FILE = 2 - ssh_FX_PERMISSION_DENIED = 3 - ssh_FX_FAILURE = 4 - ssh_FX_BAD_MESSAGE = 5 - ssh_FX_NO_CONNECTION = 6 - ssh_FX_CONNECTION_LOST = 7 - ssh_FX_OP_UNSUPPORTED = 8 + sshFxOk = 0 + sshFxEOF = 1 + sshFxNoSuchFile = 2 + sshFxPermissionDenied = 3 + sshFxFailure = 4 + sshFxBadMessage = 5 + sshFxNoConnection = 6 + sshFxConnectionLost = 7 + sshFxOPUnsupported = 8 // see draft-ietf-secsh-filexfer-13 // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 - ssh_FX_INVALID_HANDLE = 9 - ssh_FX_NO_SUCH_PATH = 10 - ssh_FX_FILE_ALREADY_EXISTS = 11 - ssh_FX_WRITE_PROTECT = 12 - ssh_FX_NO_MEDIA = 13 - ssh_FX_NO_SPACE_ON_FILESYSTEM = 14 - ssh_FX_QUOTA_EXCEEDED = 15 - ssh_FX_UNKNOWN_PRINCIPAL = 16 - ssh_FX_LOCK_CONFLICT = 17 - ssh_FX_DIR_NOT_EMPTY = 18 - ssh_FX_NOT_A_DIRECTORY = 19 - ssh_FX_INVALID_FILENAME = 20 - ssh_FX_LINK_LOOP = 21 - ssh_FX_CANNOT_DELETE = 22 - ssh_FX_INVALID_PARAMETER = 23 - ssh_FX_FILE_IS_A_DIRECTORY = 24 - ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25 - ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26 - ssh_FX_DELETE_PENDING = 27 - ssh_FX_FILE_CORRUPT = 28 - ssh_FX_OWNER_INVALID = 29 - ssh_FX_GROUP_INVALID = 30 - ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31 + sshFxInvalidHandle = 9 + sshFxNoSuchPath = 10 + sshFxFileAlreadyExists = 11 + sshFxWriteProtect = 12 + sshFxNoMedia = 13 + sshFxNoSpaceOnFilesystem = 14 + sshFxQuotaExceeded = 15 + sshFxUnlnownPrincipal = 16 + sshFxLockConflict = 17 + sshFxDitNotEmpty = 18 + sshFxNotADirectory = 19 + sshFxInvalidFilename = 20 + sshFxLinkLoop = 21 + sshFxCannotDelete = 22 + sshFxInvalidParameter = 23 + sshFxFileIsADirectory = 24 + sshFxByteRangeLockConflict = 25 + sshFxByteRangeLockRefused = 26 + sshFxDeletePending = 27 + sshFxFileCorrupt = 28 + sshFxOwnerInvalid = 29 + sshFxGroupInvalid = 30 + sshFxNoMatchingByteRangeLock = 31 ) const ( - ssh_FXF_READ = 0x00000001 - ssh_FXF_WRITE = 0x00000002 - ssh_FXF_APPEND = 0x00000004 - ssh_FXF_CREAT = 0x00000008 - ssh_FXF_TRUNC = 0x00000010 - ssh_FXF_EXCL = 0x00000020 + sshFxfRead = 0x00000001 + sshFxfWrite = 0x00000002 + sshFxfAppend = 0x00000004 + sshFxfCreat = 0x00000008 + sshFxfTrunc = 0x00000010 + sshFxfExcl = 0x00000020 +) + +var ( + // supportedSFTPExtensions defines the supported extensions + supportedSFTPExtensions = []sshExtensionPair{ + {"hardlink@openssh.com", "1"}, + {"posix-rename@openssh.com", "1"}, + } + sftpExtensions = supportedSFTPExtensions ) type fxp uint8 func (f fxp) String() string { switch f { - case ssh_FXP_INIT: + case sshFxpInit: return "SSH_FXP_INIT" - case ssh_FXP_VERSION: + case sshFxpVersion: return "SSH_FXP_VERSION" - case ssh_FXP_OPEN: + case sshFxpOpen: return "SSH_FXP_OPEN" - case ssh_FXP_CLOSE: + case sshFxpClose: return "SSH_FXP_CLOSE" - case ssh_FXP_READ: + case sshFxpRead: return "SSH_FXP_READ" - case ssh_FXP_WRITE: + case sshFxpWrite: return "SSH_FXP_WRITE" - case ssh_FXP_LSTAT: + case sshFxpLstat: return "SSH_FXP_LSTAT" - case ssh_FXP_FSTAT: + case sshFxpFstat: return "SSH_FXP_FSTAT" - case ssh_FXP_SETSTAT: + case sshFxpSetstat: return "SSH_FXP_SETSTAT" - case ssh_FXP_FSETSTAT: + case sshFxpFsetstat: return "SSH_FXP_FSETSTAT" - case ssh_FXP_OPENDIR: + case sshFxpOpendir: return "SSH_FXP_OPENDIR" - case ssh_FXP_READDIR: + case sshFxpReaddir: return "SSH_FXP_READDIR" - case ssh_FXP_REMOVE: + case sshFxpRemove: return "SSH_FXP_REMOVE" - case ssh_FXP_MKDIR: + case sshFxpMkdir: return "SSH_FXP_MKDIR" - case ssh_FXP_RMDIR: + case sshFxpRmdir: return "SSH_FXP_RMDIR" - case ssh_FXP_REALPATH: + case sshFxpRealpath: return "SSH_FXP_REALPATH" - case ssh_FXP_STAT: + case sshFxpStat: return "SSH_FXP_STAT" - case ssh_FXP_RENAME: + case sshFxpRename: return "SSH_FXP_RENAME" - case ssh_FXP_READLINK: + case sshFxpReadlink: return "SSH_FXP_READLINK" - case ssh_FXP_SYMLINK: + case sshFxpSymlink: return "SSH_FXP_SYMLINK" - case ssh_FXP_STATUS: + case sshFxpStatus: return "SSH_FXP_STATUS" - case ssh_FXP_HANDLE: + case sshFxpHandle: return "SSH_FXP_HANDLE" - case ssh_FXP_DATA: + case sshFxpData: return "SSH_FXP_DATA" - case ssh_FXP_NAME: + case sshFxpName: return "SSH_FXP_NAME" - case ssh_FXP_ATTRS: + case sshFxpAttrs: return "SSH_FXP_ATTRS" - case ssh_FXP_EXTENDED: + case sshFxpExtended: return "SSH_FXP_EXTENDED" - case ssh_FXP_EXTENDED_REPLY: + case sshFxpExtendedReply: return "SSH_FXP_EXTENDED_REPLY" default: return "unknown" @@ -152,23 +161,23 @@ type fx uint8 func (f fx) String() string { switch f { - case ssh_FX_OK: + case sshFxOk: return "SSH_FX_OK" - case ssh_FX_EOF: + case sshFxEOF: return "SSH_FX_EOF" - case ssh_FX_NO_SUCH_FILE: + case sshFxNoSuchFile: return "SSH_FX_NO_SUCH_FILE" - case ssh_FX_PERMISSION_DENIED: + case sshFxPermissionDenied: return "SSH_FX_PERMISSION_DENIED" - case ssh_FX_FAILURE: + case sshFxFailure: return "SSH_FX_FAILURE" - case ssh_FX_BAD_MESSAGE: + case sshFxBadMessage: return "SSH_FX_BAD_MESSAGE" - case ssh_FX_NO_CONNECTION: + case sshFxNoConnection: return "SSH_FX_NO_CONNECTION" - case ssh_FX_CONNECTION_LOST: + case sshFxConnectionLost: return "SSH_FX_CONNECTION_LOST" - case ssh_FX_OP_UNSUPPORTED: + case sshFxOPUnsupported: return "SSH_FX_OP_UNSUPPORTED" default: return "unknown" @@ -214,4 +223,37 @@ type StatusError struct { msg, lang string } -func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) } +func (s *StatusError) Error() string { + return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) +} + +// FxCode returns the error code typed to match against the exported codes +func (s *StatusError) FxCode() fxerr { + return fxerr(s.Code) +} + +func getSupportedExtensionByName(extensionName string) (sshExtensionPair, error) { + for _, supportedExtension := range supportedSFTPExtensions { + if supportedExtension.Name == extensionName { + return supportedExtension, nil + } + } + return sshExtensionPair{}, fmt.Errorf("Unsupported extension: %v", extensionName) +} + +// SetSFTPExtensions allows to customize the supported server extensions. +// See the variable supportedSFTPExtensions for supported extensions. +// This method accepts a slice of sshExtensionPair names for example 'hardlink@openssh.com'. +// If an invalid extension is given an error will be returned and nothing will be changed +func SetSFTPExtensions(extensions ...string) error { + tempExtensions := []sshExtensionPair{} + for _, extension := range extensions { + sftpExtension, err := getSupportedExtensionByName(extension) + if err != nil { + return err + } + tempExtensions = append(tempExtensions, sftpExtension) + } + sftpExtensions = tempExtensions + return nil +} diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go new file mode 100644 index 000000000..d40457776 --- /dev/null +++ b/vendor/github.com/pkg/sftp/syscall_fixed.go @@ -0,0 +1,9 @@ +// +build plan9 windows js,wasm + +// Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of +// 0xf000. None of the the other S_IFxyz values include the "1" (in 0x1f000) +// which prevents them from matching the bitmask. + +package sftp + +const S_IFMT = 0xf000 diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go new file mode 100644 index 000000000..4c2b240cf --- /dev/null +++ b/vendor/github.com/pkg/sftp/syscall_good.go @@ -0,0 +1,8 @@ +// +build !plan9,!windows +// +build !js !wasm + +package sftp + +import "syscall" + +const S_IFMT = syscall.S_IFMT diff --git a/vendor/github.com/putdotio/go-putio/LICENSE b/vendor/github.com/putdotio/go-putio/putio/LICENSE similarity index 100% rename from vendor/github.com/putdotio/go-putio/LICENSE rename to vendor/github.com/putdotio/go-putio/putio/LICENSE diff --git a/vendor/github.com/putdotio/go-putio/putio/client.go b/vendor/github.com/putdotio/go-putio/putio/client.go index ec0d9d7a8..e6f263a04 100644 --- a/vendor/github.com/putdotio/go-putio/putio/client.go +++ b/vendor/github.com/putdotio/go-putio/putio/client.go @@ -3,7 +3,6 @@ package putio import ( "context" "encoding/json" - "fmt" "io" "io/ioutil" "net/http" @@ -31,6 +30,9 @@ type Client struct { // User agent for client UserAgent string + // Override host header for API requests + Host string + // ExtraHeaders are passed to the API server on every request. ExtraHeaders http.Header @@ -79,7 +81,8 @@ func (c *Client) ValidateToken(ctx context.Context) (userID *int64, err error) { var r struct { UserID *int64 `json:"user_id"` } - _, err = c.Do(req, &r) + resp, err := c.Do(req, &r) + defer resp.Body.Close() return r.UserID, err } @@ -109,6 +112,10 @@ func (c *Client) NewRequest(ctx context.Context, method, relURL string, body io. req.Header.Set("Accept", defaultMediaType) req.Header.Set("User-Agent", c.UserAgent) + if c.Host != "" { + req.Host = c.Host + } + // merge headers with extra headers for header, values := range c.ExtraHeaders { for _, value := range values { @@ -156,25 +163,22 @@ func (c *Client) Do(r *http.Request, v interface{}) (*http.Response, error) { // status code is not in success range, it will try to return a structured // error. func checkResponse(r *http.Response) error { - status := r.StatusCode - switch { - case status >= 200 && status <= 399: + if r.StatusCode >= 200 && r.StatusCode <= 399 { return nil - case status >= 400 && status <= 599: - // server returns json - default: - return fmt.Errorf("unexpected status code: %d", status) } - errorResponse := &ErrorResponse{Response: r} - data, err := ioutil.ReadAll(r.Body) - if err != nil { - return fmt.Errorf("body read error: %s. status: %v. Details: %v:", err, status, string(data[:250])) + + // server possibly returns json and more details + er := &ErrorResponse{Response: r} + + er.Body, er.ParseError = ioutil.ReadAll(r.Body) + if er.ParseError != nil { + return er } - if len(data) > 0 { - err = json.Unmarshal(data, errorResponse) - if err != nil { - return fmt.Errorf("json decode error: %s. status: %v. Details: %v:", err, status, string(data[:250])) + if r.Header.Get("content-type") == "application/json" { + er.ParseError = json.Unmarshal(er.Body, er) + if er.ParseError != nil { + return er } } - return errorResponse + return er } diff --git a/vendor/github.com/putdotio/go-putio/putio/error.go b/vendor/github.com/putdotio/go-putio/putio/error.go index 659727806..5fbcd60b5 100644 --- a/vendor/github.com/putdotio/go-putio/putio/error.go +++ b/vendor/github.com/putdotio/go-putio/putio/error.go @@ -7,19 +7,31 @@ import ( // ErrorResponse reports the error caused by an API request. type ErrorResponse struct { + // Original http.Response Response *http.Response `json:"-"` + // Body read from Response + Body []byte `json:"-"` + + // Error while parsing the response + ParseError error + + // These fileds are parsed from response if JSON. Message string `json:"error_message"` Type string `json:"error_type"` } func (e *ErrorResponse) Error() string { + if e.ParseError != nil { + return fmt.Errorf("cannot parse response. code:%d error:%q body:%q", + e.Response.StatusCode, e.ParseError.Error(), string(e.Body[:250])).Error() + } return fmt.Sprintf( - "Type: %v Message: %q. Original error: %v %v: %v", + "putio error. code:%d type:%q message:%q request:%v %v", + e.Response.StatusCode, e.Type, e.Message, e.Response.Request.Method, e.Response.Request.URL, - e.Response.Status, ) } diff --git a/vendor/github.com/putdotio/go-putio/putio/events.go b/vendor/github.com/putdotio/go-putio/putio/events.go index 0b0db2fe6..b7605a462 100644 --- a/vendor/github.com/putdotio/go-putio/putio/events.go +++ b/vendor/github.com/putdotio/go-putio/putio/events.go @@ -24,7 +24,6 @@ func (e *EventsService) List(ctx context.Context) ([]Event, error) { return nil, err } return r.Events, nil - } // Delete Clears all all dashboard events. diff --git a/vendor/github.com/putdotio/go-putio/putio/files.go b/vendor/github.com/putdotio/go-putio/putio/files.go index 079ab493b..55c447992 100644 --- a/vendor/github.com/putdotio/go-putio/putio/files.go +++ b/vendor/github.com/putdotio/go-putio/putio/files.go @@ -202,7 +202,7 @@ func (f *FilesService) Move(ctx context.Context, parent int64, files ...int64) e // Upload reads from given io.Reader and uploads the file contents to Put.io // servers under directory given by parent. If parent is negative, user's -// prefered folder is used. +// preferred folder is used. // // If the uploaded file is a torrent file, Put.io will interpret it as a // transfer and Transfer field will be present to represent the status of the @@ -219,7 +219,7 @@ func (f *FilesService) Upload(ctx context.Context, r io.Reader, filename string, var buf bytes.Buffer mw := multipart.NewWriter(&buf) - // negative parent means use user's prefered download folder. + // negative parent means use user's preferred download folder. if parent >= 0 { err := mw.WriteField("parent_id", itoa(parent)) if err != nil { @@ -322,7 +322,7 @@ func (f *FilesService) sharedWith(ctx context.Context, id int64) ([]share, error return r.Shared, nil } -// Subtitles lists available subtitles for the given file for user's prefered +// Subtitles lists available subtitles for the given file for user's preferred // subtitle language. func (f *FilesService) Subtitles(ctx context.Context, id int64) ([]Subtitle, error) { req, err := f.client.NewRequest(ctx, "GET", "/v2/files/"+itoa(id)+"/subtitles", nil) diff --git a/vendor/github.com/putdotio/go-putio/putio/go.mod b/vendor/github.com/putdotio/go-putio/putio/go.mod new file mode 100644 index 000000000..fcd7c2e44 --- /dev/null +++ b/vendor/github.com/putdotio/go-putio/putio/go.mod @@ -0,0 +1,3 @@ +module github.com/putdotio/go-putio/putio + +go 1.13 diff --git a/vendor/github.com/putdotio/go-putio/putio/zips.go b/vendor/github.com/putdotio/go-putio/putio/zips.go index 66e33cc84..fced4dcdd 100644 --- a/vendor/github.com/putdotio/go-putio/putio/zips.go +++ b/vendor/github.com/putdotio/go-putio/putio/zips.go @@ -26,7 +26,6 @@ func (z *ZipsService) Get(ctx context.Context, id int64) (Zip, error) { } return r, nil - } // List lists active zip files. diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go deleted file mode 100644 index 9656c42a1..000000000 --- a/vendor/github.com/russross/blackfriday/doc.go +++ /dev/null @@ -1,32 +0,0 @@ -// Package blackfriday is a Markdown processor. -// -// It translates plain text with simple formatting rules into HTML or LaTeX. -// -// Sanitized Anchor Names -// -// Blackfriday includes an algorithm for creating sanitized anchor names -// corresponding to a given input text. This algorithm is used to create -// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The -// algorithm is specified below, so that other packages can create -// compatible anchor names and links to those anchors. -// -// The algorithm iterates over the input text, interpreted as UTF-8, -// one Unicode code point (rune) at a time. All runes that are letters (category L) -// or numbers (category N) are considered valid characters. They are mapped to -// lower case, and included in the output. All other runes are considered -// invalid characters. Invalid characters that preceed the first valid character, -// as well as invalid character that follow the last valid character -// are dropped completely. All other sequences of invalid characters -// between two valid characters are replaced with a single dash character '-'. -// -// SanitizedAnchorName exposes this functionality, and can be used to -// create compatible links to the anchor names generated by blackfriday. -// This algorithm is also implemented in a small standalone package at -// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients -// that want a small package and don't need full functionality of blackfriday. -package blackfriday - -// NOTE: Keep Sanitized Anchor Name algorithm in sync with package -// github.com/shurcooL/sanitized_anchor_name. -// Otherwise, users of sanitized_anchor_name will get anchor names -// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/go.mod b/vendor/github.com/russross/blackfriday/go.mod deleted file mode 100644 index b05561a06..000000000 --- a/vendor/github.com/russross/blackfriday/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/russross/blackfriday diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go deleted file mode 100644 index e0a6c69c9..000000000 --- a/vendor/github.com/russross/blackfriday/html.go +++ /dev/null @@ -1,938 +0,0 @@ -// -// Blackfriday Markdown Processor -// Available at http://github.com/russross/blackfriday -// -// Copyright © 2011 Russ Ross . -// Distributed under the Simplified BSD License. -// See README.md for details. -// - -// -// -// HTML rendering backend -// -// - -package blackfriday - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" -) - -// Html renderer configuration options. -const ( - HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks - HTML_SKIP_STYLE // skip embedded