From 03b629064a22ac0d6389a4b1d768a91deaffa0ba Mon Sep 17 00:00:00 2001 From: Caleb Case Date: Mon, 11 May 2020 14:57:46 -0400 Subject: [PATCH] Tardigrade Backend: Dependencies --- go.mod | 1 + go.sum | 56 +- vendor/github.com/btcsuite/btcutil/LICENSE | 16 + .../btcsuite/btcutil/base58/README.md | 34 + .../btcsuite/btcutil/base58/alphabet.go | 49 + .../btcsuite/btcutil/base58/base58.go | 75 + .../btcsuite/btcutil/base58/base58check.go | 52 + .../btcsuite/btcutil/base58/cov_report.sh | 17 + .../github.com/btcsuite/btcutil/base58/doc.go | 29 + .../calebcase/tmpfile/.golangci.yml | 45 + vendor/github.com/calebcase/tmpfile/LICENSE | 202 + .../calebcase/tmpfile/LICENSE.golang | 27 + vendor/github.com/calebcase/tmpfile/README.md | 30 + vendor/github.com/calebcase/tmpfile/doc.go | 9 + vendor/github.com/calebcase/tmpfile/go.mod | 5 + vendor/github.com/calebcase/tmpfile/go.sum | 2 + .../github.com/calebcase/tmpfile/tmpfile.go | 25 + .../calebcase/tmpfile/tmpfile_windows.go | 94 + vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 + vendor/github.com/gogo/protobuf/LICENSE | 35 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../github.com/gogo/protobuf/proto/clone.go | 258 + .../gogo/protobuf/proto/custom_gogo.go | 39 + .../github.com/gogo/protobuf/proto/decode.go | 427 ++ .../gogo/protobuf/proto/deprecated.go | 63 + .../github.com/gogo/protobuf/proto/discard.go | 350 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 49 + .../github.com/gogo/protobuf/proto/encode.go | 203 + .../gogo/protobuf/proto/encode_gogo.go | 33 + .../github.com/gogo/protobuf/proto/equal.go | 300 + .../gogo/protobuf/proto/extensions.go | 604 ++ .../gogo/protobuf/proto/extensions_gogo.go | 368 + vendor/github.com/gogo/protobuf/proto/lib.go | 967 +++ .../gogo/protobuf/proto/lib_gogo.go | 50 + .../gogo/protobuf/proto/message_set.go | 181 + .../gogo/protobuf/proto/pointer_reflect.go | 357 + .../protobuf/proto/pointer_reflect_gogo.go | 59 + .../gogo/protobuf/proto/pointer_unsafe.go | 308 + .../protobuf/proto/pointer_unsafe_gogo.go | 56 + .../gogo/protobuf/proto/properties.go | 599 ++ .../gogo/protobuf/proto/properties_gogo.go | 36 + .../gogo/protobuf/proto/skip_gogo.go | 119 + .../gogo/protobuf/proto/table_marshal.go | 3006 ++++++++ .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../gogo/protobuf/proto/table_merge.go | 657 ++ .../gogo/protobuf/proto/table_unmarshal.go | 2245 ++++++ .../protobuf/proto/table_unmarshal_gogo.go | 385 + vendor/github.com/gogo/protobuf/proto/text.go | 928 +++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1018 +++ .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 49 + .../gogo/protobuf/proto/wrappers.go | 1888 +++++ .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../github.com/minio/sha256-simd/.gitignore | 1 + .../github.com/minio/sha256-simd/.travis.yml | 24 + vendor/github.com/minio/sha256-simd/LICENSE | 202 + vendor/github.com/minio/sha256-simd/README.md | 133 + .../github.com/minio/sha256-simd/appveyor.yml | 32 + vendor/github.com/minio/sha256-simd/cpuid.go | 119 + .../github.com/minio/sha256-simd/cpuid_386.go | 24 + .../github.com/minio/sha256-simd/cpuid_386.s | 53 + .../minio/sha256-simd/cpuid_amd64.go | 24 + .../minio/sha256-simd/cpuid_amd64.s | 53 + .../github.com/minio/sha256-simd/cpuid_arm.go | 32 + .../minio/sha256-simd/cpuid_linux_arm64.go | 49 + .../minio/sha256-simd/cpuid_other.go | 34 + .../minio/sha256-simd/cpuid_others_arm64.go | 35 + vendor/github.com/minio/sha256-simd/go.mod | 1 + vendor/github.com/minio/sha256-simd/sha256.go | 409 ++ .../sha256-simd/sha256blockAvx2_amd64.go | 22 + .../minio/sha256-simd/sha256blockAvx2_amd64.s | 1449 ++++ .../sha256-simd/sha256blockAvx512_amd64.asm | 686 ++ .../sha256-simd/sha256blockAvx512_amd64.go | 500 ++ .../sha256-simd/sha256blockAvx512_amd64.s | 267 + .../minio/sha256-simd/sha256blockAvx_amd64.go | 22 + .../minio/sha256-simd/sha256blockAvx_amd64.s | 408 ++ .../minio/sha256-simd/sha256blockSha_amd64.go | 6 + .../minio/sha256-simd/sha256blockSha_amd64.s | 266 + .../sha256-simd/sha256blockSsse_amd64.go | 22 + .../minio/sha256-simd/sha256blockSsse_amd64.s | 429 ++ .../minio/sha256-simd/sha256block_386.go | 25 + .../minio/sha256-simd/sha256block_amd64.go | 53 + .../minio/sha256-simd/sha256block_arm.go | 25 + .../minio/sha256-simd/sha256block_arm64.go | 37 + .../minio/sha256-simd/sha256block_arm64.s | 192 + .../minio/sha256-simd/sha256block_other.go | 25 + .../spacemonkeygo/errors/.travis.yml | 6 + .../github.com/spacemonkeygo/errors/LICENSE | 191 + .../github.com/spacemonkeygo/errors/README.md | 19 + .../github.com/spacemonkeygo/errors/config.go | 24 + .../github.com/spacemonkeygo/errors/ctx17.go | 30 + .../spacemonkeygo/errors/data_keys.go | 32 + vendor/github.com/spacemonkeygo/errors/doc.go | 205 + .../github.com/spacemonkeygo/errors/errors.go | 648 ++ .../spacemonkeygo/errors/syscall.go | 26 + .../spacemonkeygo/errors/syscall_ae.go | 21 + .../github.com/spacemonkeygo/errors/utils.go | 155 + .../github.com/spacemonkeygo/errors/xctx.go | 26 + .../spacemonkeygo/monkit/v3/AUTHORS | 1 + .../spacemonkeygo/monkit/v3/LICENSE | 191 + .../spacemonkeygo/monkit/v3/README.md | 637 ++ .../spacemonkeygo/monkit/v3/callers.go | 48 + .../spacemonkeygo/monkit/v3/cas_safe.go | 75 + .../spacemonkeygo/monkit/v3/cas_unsafe.go | 70 + .../spacemonkeygo/monkit/v3/counter.go | 129 + .../github.com/spacemonkeygo/monkit/v3/ctx.go | 363 + .../spacemonkeygo/monkit/v3/dist.go | 61 + .../spacemonkeygo/monkit/v3/distgen.go.m4 | 185 + .../github.com/spacemonkeygo/monkit/v3/doc.go | 579 ++ .../spacemonkeygo/monkit/v3/durdist.go | 185 + .../spacemonkeygo/monkit/v3/error_names.go | 114 + .../spacemonkeygo/monkit/v3/error_names_ae.go | 21 + .../monkit/v3/error_names_syscall.go | 24 + .../spacemonkeygo/monkit/v3/floatdist.go | 184 + .../spacemonkeygo/monkit/v3/func.go | 71 + .../spacemonkeygo/monkit/v3/funcset.go | 78 + .../spacemonkeygo/monkit/v3/funcstats.go | 219 + .../github.com/spacemonkeygo/monkit/v3/go.mod | 5 + .../github.com/spacemonkeygo/monkit/v3/go.sum | 5 + .../github.com/spacemonkeygo/monkit/v3/id.go | 48 + .../spacemonkeygo/monkit/v3/intdist.go | 184 + .../spacemonkeygo/monkit/v3/meter.go | 212 + .../monkit/v3/monotime/monotime.go | 7 + .../monkit/v3/monotime/monotime_fallback.go | 7 + .../monkit/v3/monotime/monotime_windows.go | 27 + .../spacemonkeygo/monkit/v3/registry.go | 256 + .../github.com/spacemonkeygo/monkit/v3/rng.go | 181 + .../spacemonkeygo/monkit/v3/scope.go | 301 + .../spacemonkeygo/monkit/v3/span.go | 146 + .../spacemonkeygo/monkit/v3/spanbag.go | 59 + .../spacemonkeygo/monkit/v3/spinlock.go | 35 + .../spacemonkeygo/monkit/v3/stats.go | 77 + .../spacemonkeygo/monkit/v3/struct.go | 59 + .../spacemonkeygo/monkit/v3/tags.go | 157 + .../spacemonkeygo/monkit/v3/task.go | 74 + .../spacemonkeygo/monkit/v3/timer.go | 96 + .../spacemonkeygo/monkit/v3/trace.go | 136 + .../github.com/spacemonkeygo/monkit/v3/val.go | 204 + .../github.com/vivint/infectious/.travis.yml | 14 + vendor/github.com/vivint/infectious/LICENSE | 67 + vendor/github.com/vivint/infectious/README.md | 133 + .../vivint/infectious/addmul_amd64.go | 58 + .../vivint/infectious/addmul_amd64.s | 199 + .../vivint/infectious/addmul_noasm.go | 40 + .../vivint/infectious/addmul_tables_amd64.go | 288 + .../vivint/infectious/berlekamp_welch.go | 245 + vendor/github.com/vivint/infectious/common.go | 48 + vendor/github.com/vivint/infectious/fec.go | 319 + vendor/github.com/vivint/infectious/gf_alg.go | 422 ++ vendor/github.com/vivint/infectious/math.go | 179 + vendor/github.com/vivint/infectious/tables.go | 154 + vendor/github.com/zeebo/errs/.gitignore | 1 + vendor/github.com/zeebo/errs/AUTHORS | 4 + vendor/github.com/zeebo/errs/LICENSE | 21 + vendor/github.com/zeebo/errs/README.md | 235 + vendor/github.com/zeebo/errs/errs.go | 296 + vendor/github.com/zeebo/errs/go.mod | 3 + vendor/github.com/zeebo/errs/group.go | 100 + vendor/go.uber.org/atomic/.codecov.yml | 15 + vendor/go.uber.org/atomic/.gitignore | 11 + vendor/go.uber.org/atomic/.travis.yml | 27 + vendor/go.uber.org/atomic/LICENSE.txt | 19 + vendor/go.uber.org/atomic/Makefile | 51 + vendor/go.uber.org/atomic/README.md | 36 + vendor/go.uber.org/atomic/atomic.go | 351 + vendor/go.uber.org/atomic/error.go | 55 + vendor/go.uber.org/atomic/glide.lock | 17 + vendor/go.uber.org/atomic/glide.yaml | 6 + vendor/go.uber.org/atomic/string.go | 49 + vendor/go.uber.org/multierr/.codecov.yml | 15 + vendor/go.uber.org/multierr/.gitignore | 1 + vendor/go.uber.org/multierr/.travis.yml | 33 + vendor/go.uber.org/multierr/CHANGELOG.md | 28 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/Makefile | 74 + vendor/go.uber.org/multierr/README.md | 23 + vendor/go.uber.org/multierr/error.go | 401 ++ vendor/go.uber.org/multierr/glide.lock | 19 + vendor/go.uber.org/multierr/glide.yaml | 8 + vendor/go.uber.org/zap/.codecov.yml | 17 + vendor/go.uber.org/zap/.gitignore | 28 + vendor/go.uber.org/zap/.readme.tmpl | 108 + vendor/go.uber.org/zap/.travis.yml | 21 + vendor/go.uber.org/zap/CHANGELOG.md | 327 + vendor/go.uber.org/zap/CODE_OF_CONDUCT.md | 75 + vendor/go.uber.org/zap/CONTRIBUTING.md | 81 + vendor/go.uber.org/zap/FAQ.md | 155 + vendor/go.uber.org/zap/LICENSE.txt | 19 + vendor/go.uber.org/zap/Makefile | 76 + vendor/go.uber.org/zap/README.md | 136 + vendor/go.uber.org/zap/array.go | 320 + vendor/go.uber.org/zap/buffer/buffer.go | 115 + vendor/go.uber.org/zap/buffer/pool.go | 49 + vendor/go.uber.org/zap/check_license.sh | 17 + vendor/go.uber.org/zap/config.go | 243 + vendor/go.uber.org/zap/doc.go | 113 + vendor/go.uber.org/zap/encoder.go | 75 + vendor/go.uber.org/zap/error.go | 80 + vendor/go.uber.org/zap/field.go | 310 + vendor/go.uber.org/zap/flag.go | 39 + vendor/go.uber.org/zap/glide.lock | 76 + vendor/go.uber.org/zap/glide.yaml | 35 + vendor/go.uber.org/zap/global.go | 168 + vendor/go.uber.org/zap/global_go112.go | 26 + vendor/go.uber.org/zap/global_prego112.go | 26 + vendor/go.uber.org/zap/http_handler.go | 81 + .../zap/internal/bufferpool/bufferpool.go | 31 + .../go.uber.org/zap/internal/color/color.go | 44 + vendor/go.uber.org/zap/internal/exit/exit.go | 64 + vendor/go.uber.org/zap/level.go | 132 + vendor/go.uber.org/zap/logger.go | 305 + vendor/go.uber.org/zap/options.go | 109 + vendor/go.uber.org/zap/sink.go | 161 + vendor/go.uber.org/zap/stacktrace.go | 126 + vendor/go.uber.org/zap/sugar.go | 304 + vendor/go.uber.org/zap/time.go | 27 + vendor/go.uber.org/zap/writer.go | 99 + .../zap/zapcore/console_encoder.go | 147 + vendor/go.uber.org/zap/zapcore/core.go | 113 + vendor/go.uber.org/zap/zapcore/doc.go | 24 + vendor/go.uber.org/zap/zapcore/encoder.go | 348 + vendor/go.uber.org/zap/zapcore/entry.go | 257 + vendor/go.uber.org/zap/zapcore/error.go | 120 + vendor/go.uber.org/zap/zapcore/field.go | 212 + vendor/go.uber.org/zap/zapcore/hook.go | 68 + .../go.uber.org/zap/zapcore/json_encoder.go | 505 ++ vendor/go.uber.org/zap/zapcore/level.go | 175 + .../go.uber.org/zap/zapcore/level_strings.go | 46 + vendor/go.uber.org/zap/zapcore/marshaler.go | 53 + .../go.uber.org/zap/zapcore/memory_encoder.go | 179 + vendor/go.uber.org/zap/zapcore/sampler.go | 134 + vendor/go.uber.org/zap/zapcore/tee.go | 81 + .../go.uber.org/zap/zapcore/write_syncer.go | 123 + vendor/golang.org/x/crypto/argon2/argon2.go | 285 + vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 60 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 243 + .../x/crypto/argon2/blamka_generic.go | 163 + .../golang.org/x/crypto/argon2/blamka_ref.go | 15 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 289 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 37 + .../x/crypto/blake2b/blake2bAVX2_amd64.s | 750 ++ .../x/crypto/blake2b/blake2b_amd64.go | 24 + .../x/crypto/blake2b/blake2b_amd64.s | 281 + .../x/crypto/blake2b/blake2b_generic.go | 182 + .../x/crypto/blake2b/blake2b_ref.go | 11 + vendor/golang.org/x/crypto/blake2b/blake2x.go | 177 + .../golang.org/x/crypto/blake2b/register.go | 32 + .../golang.org/x/sync/semaphore/semaphore.go | 127 + vendor/modules.txt | 82 + vendor/storj.io/common/LICENSE | 202 + vendor/storj.io/common/encryption/aesgcm.go | 165 + vendor/storj.io/common/encryption/bits.go | 30 + vendor/storj.io/common/encryption/common.go | 18 + .../storj.io/common/encryption/encryption.go | 156 + vendor/storj.io/common/encryption/pad.go | 89 + vendor/storj.io/common/encryption/password.go | 47 + vendor/storj.io/common/encryption/path.go | 510 ++ .../storj.io/common/encryption/secretbox.go | 119 + vendor/storj.io/common/encryption/store.go | 313 + .../storj.io/common/encryption/transform.go | 188 + vendor/storj.io/common/errs2/collect.go | 40 + vendor/storj.io/common/errs2/doc.go | 5 + vendor/storj.io/common/errs2/group.go | 38 + vendor/storj.io/common/errs2/ignore.go | 30 + vendor/storj.io/common/errs2/rpc.go | 17 + vendor/storj.io/common/errs2/sanatizer.go | 52 + vendor/storj.io/common/fpath/atomic.go | 42 + vendor/storj.io/common/fpath/doc.go | 5 + vendor/storj.io/common/fpath/editor.go | 62 + vendor/storj.io/common/fpath/os.go | 133 + vendor/storj.io/common/fpath/path.go | 148 + vendor/storj.io/common/fpath/temp_data.go | 38 + .../common/identity/certificate_authority.go | 501 ++ vendor/storj.io/common/identity/common.go | 16 + vendor/storj.io/common/identity/doc.go | 5 + vendor/storj.io/common/identity/generate.go | 89 + vendor/storj.io/common/identity/identity.go | 547 ++ vendor/storj.io/common/identity/utils.go | 108 + .../storj.io/common/internal/grpchook/hook.go | 51 + vendor/storj.io/common/macaroon/apikey.go | 294 + vendor/storj.io/common/macaroon/caveat.go | 15 + vendor/storj.io/common/macaroon/doc.go | 5 + vendor/storj.io/common/macaroon/macaroon.go | 128 + vendor/storj.io/common/macaroon/serialize.go | 215 + vendor/storj.io/common/macaroon/types.pb.go | 203 + vendor/storj.io/common/macaroon/types.proto | 33 + vendor/storj.io/common/memory/doc.go | 5 + vendor/storj.io/common/memory/size.go | 247 + vendor/storj.io/common/memory/sizes.go | 42 + vendor/storj.io/common/memory/string.go | 16 + vendor/storj.io/common/netutil/common.go | 8 + .../storj.io/common/netutil/timeout_linux.go | 56 + .../storj.io/common/netutil/timeout_other.go | 16 + vendor/storj.io/common/netutil/tracking.go | 36 + vendor/storj.io/common/paths/doc.go | 6 + vendor/storj.io/common/paths/path.go | 153 + vendor/storj.io/common/pb/alias.go | 12 + vendor/storj.io/common/pb/certificate.pb.go | 206 + vendor/storj.io/common/pb/certificate.proto | 20 + vendor/storj.io/common/pb/contact.pb.go | 483 ++ vendor/storj.io/common/pb/contact.proto | 42 + vendor/storj.io/common/pb/datarepair.pb.go | 103 + vendor/storj.io/common/pb/datarepair.proto | 16 + vendor/storj.io/common/pb/doc.go | 7 + vendor/storj.io/common/pb/encryption.pb.go | 125 + vendor/storj.io/common/pb/encryption.proto | 19 + .../common/pb/encryption_access.pb.go | 195 + .../common/pb/encryption_access.proto | 27 + vendor/storj.io/common/pb/gogo.proto | 143 + vendor/storj.io/common/pb/gracefulexit.pb.go | 1406 ++++ vendor/storj.io/common/pb/gracefulexit.proto | 137 + vendor/storj.io/common/pb/heldamount.pb.go | 483 ++ vendor/storj.io/common/pb/heldamount.proto | 49 + vendor/storj.io/common/pb/inspector.pb.go | 1348 ++++ vendor/storj.io/common/pb/inspector.proto | 132 + vendor/storj.io/common/pb/meta.pb.go | 93 + vendor/storj.io/common/pb/meta.proto | 13 + vendor/storj.io/common/pb/metainfo.pb.go | 6409 +++++++++++++++++ vendor/storj.io/common/pb/metainfo.proto | 632 ++ vendor/storj.io/common/pb/node.pb.go | 489 ++ vendor/storj.io/common/pb/node.proto | 73 + vendor/storj.io/common/pb/nodestats.pb.go | 628 ++ vendor/storj.io/common/pb/nodestats.proto | 58 + vendor/storj.io/common/pb/orders.pb.go | 821 +++ vendor/storj.io/common/pb/orders.proto | 168 + vendor/storj.io/common/pb/overlay.pb.go | 231 + vendor/storj.io/common/pb/overlay.proto | 34 + vendor/storj.io/common/pb/payments.pb.go | 593 ++ vendor/storj.io/common/pb/payments.proto | 36 + vendor/storj.io/common/pb/piecestore2.pb.go | 1165 +++ vendor/storj.io/common/pb/piecestore2.proto | 132 + vendor/storj.io/common/pb/pointerdb.pb.go | 534 ++ vendor/storj.io/common/pb/pointerdb.proto | 74 + .../storj.io/common/pb/referralmanager.pb.go | 326 + .../storj.io/common/pb/referralmanager.proto | 31 + vendor/storj.io/common/pb/scannerValuer.go | 39 + vendor/storj.io/common/pb/scope.pb.go | 98 + vendor/storj.io/common/pb/scope.proto | 18 + vendor/storj.io/common/pb/streams.pb.go | 234 + vendor/storj.io/common/pb/streams.proto | 27 + vendor/storj.io/common/pb/types.go | 36 + vendor/storj.io/common/pb/utils.go | 88 + vendor/storj.io/common/pb/vouchers.pb.go | 290 + vendor/storj.io/common/pb/vouchers.proto | 39 + vendor/storj.io/common/peertls/doc.go | 5 + .../storj.io/common/peertls/extensions/doc.go | 5 + .../common/peertls/extensions/extensions.go | 187 + .../common/peertls/extensions/fuzz.go | 29 + .../storj.io/common/peertls/extensions/gob.go | 231 + .../common/peertls/extensions/revocations.go | 177 + vendor/storj.io/common/peertls/peertls.go | 169 + vendor/storj.io/common/peertls/templates.go | 46 + .../storj.io/common/peertls/tlsopts/cert.go | 18 + .../storj.io/common/peertls/tlsopts/config.go | 17 + vendor/storj.io/common/peertls/tlsopts/doc.go | 5 + .../common/peertls/tlsopts/options.go | 194 + vendor/storj.io/common/peertls/tlsopts/tls.go | 109 + vendor/storj.io/common/peertls/utils.go | 96 + vendor/storj.io/common/pkcrypto/common.go | 38 + vendor/storj.io/common/pkcrypto/doc.go | 14 + vendor/storj.io/common/pkcrypto/encoding.go | 236 + vendor/storj.io/common/pkcrypto/hashing.go | 21 + vendor/storj.io/common/pkcrypto/signing.go | 172 + vendor/storj.io/common/ranger/common.go | 14 + vendor/storj.io/common/ranger/doc.go | 5 + vendor/storj.io/common/ranger/file.go | 81 + vendor/storj.io/common/ranger/reader.go | 117 + vendor/storj.io/common/ranger/readerat.go | 62 + vendor/storj.io/common/readcloser/doc.go | 5 + vendor/storj.io/common/readcloser/fatal.go | 23 + vendor/storj.io/common/readcloser/lazy.go | 35 + vendor/storj.io/common/readcloser/limit.go | 25 + vendor/storj.io/common/readcloser/multi.go | 71 + vendor/storj.io/common/rpc/common.go | 72 + vendor/storj.io/common/rpc/conn.go | 33 + vendor/storj.io/common/rpc/dial.go | 367 + vendor/storj.io/common/rpc/doc.go | 5 + vendor/storj.io/common/rpc/known_ids.go | 57 + vendor/storj.io/common/rpc/lookup.go | 48 + vendor/storj.io/common/rpc/rpcpeer/peer.go | 70 + vendor/storj.io/common/rpc/rpcpool/pool.go | 210 + .../storj.io/common/rpc/rpcstatus/status.go | 118 + .../storj.io/common/rpc/rpctracing/common.go | 18 + .../storj.io/common/rpc/rpctracing/handler.go | 57 + .../storj.io/common/rpc/rpctracing/tracing.go | 58 + vendor/storj.io/common/signing/doc.go | 5 + vendor/storj.io/common/signing/encode.go | 120 + vendor/storj.io/common/signing/peers.go | 74 + vendor/storj.io/common/signing/sign.go | 165 + vendor/storj.io/common/signing/verify.go | 117 + vendor/storj.io/common/storj/bucket.go | 36 + vendor/storj.io/common/storj/doc.go | 7 + vendor/storj.io/common/storj/encryption.go | 163 + .../storj.io/common/storj/identity_version.go | 140 + vendor/storj.io/common/storj/metainfo.go | 79 + vendor/storj.io/common/storj/node.go | 260 + vendor/storj.io/common/storj/nodeurl.go | 137 + vendor/storj.io/common/storj/object.go | 108 + .../storj.io/common/storj/object_list_item.go | 21 + vendor/storj.io/common/storj/path.go | 21 + vendor/storj.io/common/storj/pieceid.go | 140 + vendor/storj.io/common/storj/piecekey.go | 159 + vendor/storj.io/common/storj/redundancy.go | 69 + vendor/storj.io/common/storj/segment.go | 32 + vendor/storj.io/common/storj/segmentid.go | 81 + vendor/storj.io/common/storj/serialnumber.go | 117 + vendor/storj.io/common/storj/streamid.go | 101 + vendor/storj.io/common/sync2/cooldown.go | 139 + vendor/storj.io/common/sync2/copy.go | 32 + vendor/storj.io/common/sync2/cycle.go | 224 + vendor/storj.io/common/sync2/doc.go | 12 + vendor/storj.io/common/sync2/fence.go | 67 + vendor/storj.io/common/sync2/io.go | 81 + vendor/storj.io/common/sync2/limiter.go | 52 + vendor/storj.io/common/sync2/nocopy.go | 15 + .../common/sync2/parent_child_limiter.go | 59 + vendor/storj.io/common/sync2/pipe.go | 280 + vendor/storj.io/common/sync2/semaphore.go | 47 + vendor/storj.io/common/sync2/sleep.go | 22 + .../common/sync2/success_threshold.go | 109 + vendor/storj.io/common/sync2/tee.go | 204 + vendor/storj.io/common/sync2/throttle.go | 130 + vendor/storj.io/common/sync2/workgroup.go | 88 + vendor/storj.io/common/uuid/db.go | 62 + vendor/storj.io/common/uuid/fuzz.go | 28 + vendor/storj.io/common/uuid/uuid.go | 141 + vendor/storj.io/drpc/.gitignore | 3 + vendor/storj.io/drpc/.golangci.yml | 129 + vendor/storj.io/drpc/Dockerfile.jenkins | 11 + vendor/storj.io/drpc/Jenkinsfile | 27 + vendor/storj.io/drpc/LICENSE | 201 + vendor/storj.io/drpc/Makefile | 33 + vendor/storj.io/drpc/README.md | 134 + vendor/storj.io/drpc/doc.go | 5 + vendor/storj.io/drpc/drpc.go | 95 + vendor/storj.io/drpc/drpcconn/README.md | 79 + vendor/storj.io/drpc/drpcconn/conn.go | 156 + vendor/storj.io/drpc/drpcconn/doc.go | 9 + vendor/storj.io/drpc/drpcctx/README.md | 60 + vendor/storj.io/drpc/drpcctx/doc.go | 5 + vendor/storj.io/drpc/drpcctx/transport.go | 59 + vendor/storj.io/drpc/drpcdebug/README.md | 14 + vendor/storj.io/drpc/drpcdebug/doc.go | 5 + .../storj.io/drpc/drpcdebug/log_disabled.go | 9 + vendor/storj.io/drpc/drpcdebug/log_enabled.go | 18 + vendor/storj.io/drpc/drpcerr/README.md | 22 + vendor/storj.io/drpc/drpcerr/doc.go | 5 + vendor/storj.io/drpc/drpcerr/err.go | 40 + vendor/storj.io/drpc/drpcmanager/README.md | 79 + vendor/storj.io/drpc/drpcmanager/doc.go | 9 + vendor/storj.io/drpc/drpcmanager/manager.go | 335 + vendor/storj.io/drpc/drpcmetadata/README.md | 37 + vendor/storj.io/drpc/drpcmetadata/doc.go | 5 + .../drpc/drpcmetadata/invoke/README.md | 80 + .../storj.io/drpc/drpcmetadata/invoke/doc.go | 8 + .../drpc/drpcmetadata/invoke/metadata.pb.go | 79 + .../drpc/drpcmetadata/invoke/metadata.proto | 11 + vendor/storj.io/drpc/drpcmetadata/metadata.go | 79 + vendor/storj.io/drpc/drpcmux/README.md | 39 + vendor/storj.io/drpc/drpcmux/doc.go | 5 + vendor/storj.io/drpc/drpcmux/mux.go | 116 + vendor/storj.io/drpc/drpcsignal/README.md | 56 + vendor/storj.io/drpc/drpcsignal/doc.go | 5 + vendor/storj.io/drpc/drpcsignal/signal.go | 75 + vendor/storj.io/drpc/drpcstream/README.md | 147 + vendor/storj.io/drpc/drpcstream/chmutex.go | 38 + vendor/storj.io/drpc/drpcstream/doc.go | 9 + vendor/storj.io/drpc/drpcstream/stream.go | 424 ++ vendor/storj.io/drpc/drpcwire/README.md | 253 + vendor/storj.io/drpc/drpcwire/doc.go | 9 + vendor/storj.io/drpc/drpcwire/error.go | 27 + vendor/storj.io/drpc/drpcwire/packet.go | 160 + .../storj.io/drpc/drpcwire/packet_string.go | 30 + vendor/storj.io/drpc/drpcwire/split.go | 36 + vendor/storj.io/drpc/drpcwire/transport.go | 158 + vendor/storj.io/drpc/drpcwire/varint.go | 33 + vendor/storj.io/drpc/go.mod | 10 + vendor/storj.io/drpc/go.sum | 20 + vendor/storj.io/drpc/staticcheck.conf | 1 + vendor/storj.io/uplink/.clabot | 63 + vendor/storj.io/uplink/.gitignore | 37 + vendor/storj.io/uplink/CODE_OF_CONDUCT.md | 53 + vendor/storj.io/uplink/Jenkinsfile | 133 + vendor/storj.io/uplink/LICENSE | 202 + vendor/storj.io/uplink/MAINTAINERS.md | 50 + vendor/storj.io/uplink/Makefile | 7 + vendor/storj.io/uplink/README.md | 48 + vendor/storj.io/uplink/access.go | 314 + vendor/storj.io/uplink/bucket.go | 137 + vendor/storj.io/uplink/buckets.go | 128 + vendor/storj.io/uplink/common.go | 56 + vendor/storj.io/uplink/config.go | 70 + vendor/storj.io/uplink/doc.go | 120 + vendor/storj.io/uplink/download.go | 81 + vendor/storj.io/uplink/encryption.go | 116 + vendor/storj.io/uplink/go.mod | 15 + vendor/storj.io/uplink/go.sum | 154 + .../uplink/internal/expose/exposed.go | 14 + .../uplink/internal/telemetryclient/client.go | 42 + vendor/storj.io/uplink/object.go | 131 + vendor/storj.io/uplink/objects.go | 169 + .../uplink/private/ecclient/client.go | 418 ++ .../uplink/private/ecclient/common.go | 11 + .../uplink/private/eestream/common.go | 11 + .../uplink/private/eestream/decode.go | 230 + .../uplink/private/eestream/encode.go | 318 + .../uplink/private/eestream/piecebuf.go | 300 + vendor/storj.io/uplink/private/eestream/rs.go | 53 + .../uplink/private/eestream/stripe.go | 182 + .../uplink/private/eestream/unsafe_rs.go | 62 + .../storj.io/uplink/private/metainfo/batch.go | 149 + .../uplink/private/metainfo/client.go | 1199 +++ .../uplink/private/metainfo/client_old.go | 175 + .../private/metainfo/kvmetainfo/buckets.go | 128 + .../private/metainfo/kvmetainfo/interface.go | 83 + .../private/metainfo/kvmetainfo/metainfo.go | 66 + .../private/metainfo/kvmetainfo/objects.go | 628 ++ .../private/metainfo/kvmetainfo/paths.go | 29 + .../private/metainfo/kvmetainfo/project.go | 27 + .../private/metainfo/kvmetainfo/stream.go | 155 + .../private/metainfo/kvmetainfo/temputils.go | 39 + .../uplink/private/piecestore/buffering.go | 132 + .../uplink/private/piecestore/client.go | 121 + .../uplink/private/piecestore/download.go | 308 + .../uplink/private/piecestore/upload.go | 261 + .../uplink/private/piecestore/verification.go | 55 + .../uplink/private/storage/segments/common.go | 11 + .../uplink/private/storage/segments/peek.go | 57 + .../uplink/private/storage/segments/size.go | 29 + .../uplink/private/storage/segments/store.go | 131 + .../uplink/private/storage/streams/eof.go | 36 + .../uplink/private/storage/streams/path.go | 63 + .../uplink/private/storage/streams/shim.go | 62 + .../uplink/private/storage/streams/size.go | 28 + .../uplink/private/storage/streams/store.go | 585 ++ .../storj.io/uplink/private/stream/common.go | 11 + .../uplink/private/stream/download.go | 118 + .../storj.io/uplink/private/stream/upload.go | 95 + vendor/storj.io/uplink/project.go | 152 + vendor/storj.io/uplink/upload.go | 166 + 544 files changed, 86690 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/btcsuite/btcutil/LICENSE create mode 100644 vendor/github.com/btcsuite/btcutil/base58/README.md create mode 100644 vendor/github.com/btcsuite/btcutil/base58/alphabet.go create mode 100644 vendor/github.com/btcsuite/btcutil/base58/base58.go create mode 100644 vendor/github.com/btcsuite/btcutil/base58/base58check.go create mode 100644 vendor/github.com/btcsuite/btcutil/base58/cov_report.sh create mode 100644 vendor/github.com/btcsuite/btcutil/base58/doc.go create mode 100644 vendor/github.com/calebcase/tmpfile/.golangci.yml create mode 100644 vendor/github.com/calebcase/tmpfile/LICENSE create mode 100644 vendor/github.com/calebcase/tmpfile/LICENSE.golang create mode 100644 vendor/github.com/calebcase/tmpfile/README.md create mode 100644 vendor/github.com/calebcase/tmpfile/doc.go create mode 100644 vendor/github.com/calebcase/tmpfile/go.mod create mode 100644 vendor/github.com/calebcase/tmpfile/go.sum create mode 100644 vendor/github.com/calebcase/tmpfile/tmpfile.go create mode 100644 vendor/github.com/calebcase/tmpfile/tmpfile_windows.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/minio/sha256-simd/.gitignore create mode 100644 vendor/github.com/minio/sha256-simd/.travis.yml create mode 100644 vendor/github.com/minio/sha256-simd/LICENSE create mode 100644 vendor/github.com/minio/sha256-simd/README.md create mode 100644 vendor/github.com/minio/sha256-simd/appveyor.yml create mode 100644 vendor/github.com/minio/sha256-simd/cpuid.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_386.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_386.s create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_arm.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_other.go create mode 100644 vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go create mode 100644 vendor/github.com/minio/sha256-simd/go.mod create mode 100644 vendor/github.com/minio/sha256-simd/sha256.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_386.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_amd64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.go create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_arm64.s create mode 100644 vendor/github.com/minio/sha256-simd/sha256block_other.go create mode 100644 vendor/github.com/spacemonkeygo/errors/.travis.yml create mode 100644 vendor/github.com/spacemonkeygo/errors/LICENSE create mode 100644 vendor/github.com/spacemonkeygo/errors/README.md create mode 100644 vendor/github.com/spacemonkeygo/errors/config.go create mode 100644 vendor/github.com/spacemonkeygo/errors/ctx17.go create mode 100644 vendor/github.com/spacemonkeygo/errors/data_keys.go create mode 100644 vendor/github.com/spacemonkeygo/errors/doc.go create mode 100644 vendor/github.com/spacemonkeygo/errors/errors.go create mode 100644 vendor/github.com/spacemonkeygo/errors/syscall.go create mode 100644 vendor/github.com/spacemonkeygo/errors/syscall_ae.go create mode 100644 vendor/github.com/spacemonkeygo/errors/utils.go create mode 100644 vendor/github.com/spacemonkeygo/errors/xctx.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/AUTHORS create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/LICENSE create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/README.md create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/callers.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/cas_safe.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/cas_unsafe.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/counter.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/ctx.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/dist.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/distgen.go.m4 create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/doc.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/durdist.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/error_names.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/error_names_ae.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/error_names_syscall.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/floatdist.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/func.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/funcset.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/funcstats.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/go.mod create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/go.sum create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/id.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/intdist.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/meter.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_fallback.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_windows.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/registry.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/rng.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/scope.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/span.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/spanbag.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/spinlock.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/stats.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/struct.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/tags.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/task.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/timer.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/trace.go create mode 100644 vendor/github.com/spacemonkeygo/monkit/v3/val.go create mode 100644 vendor/github.com/vivint/infectious/.travis.yml create mode 100644 vendor/github.com/vivint/infectious/LICENSE create mode 100644 vendor/github.com/vivint/infectious/README.md create mode 100644 vendor/github.com/vivint/infectious/addmul_amd64.go create mode 100644 vendor/github.com/vivint/infectious/addmul_amd64.s create mode 100644 vendor/github.com/vivint/infectious/addmul_noasm.go create mode 100644 vendor/github.com/vivint/infectious/addmul_tables_amd64.go create mode 100644 vendor/github.com/vivint/infectious/berlekamp_welch.go create mode 100644 vendor/github.com/vivint/infectious/common.go create mode 100644 vendor/github.com/vivint/infectious/fec.go create mode 100644 vendor/github.com/vivint/infectious/gf_alg.go create mode 100644 vendor/github.com/vivint/infectious/math.go create mode 100644 vendor/github.com/vivint/infectious/tables.go create mode 100644 vendor/github.com/zeebo/errs/.gitignore create mode 100644 vendor/github.com/zeebo/errs/AUTHORS create mode 100644 vendor/github.com/zeebo/errs/LICENSE create mode 100644 vendor/github.com/zeebo/errs/README.md create mode 100644 vendor/github.com/zeebo/errs/errs.go create mode 100644 vendor/github.com/zeebo/errs/go.mod create mode 100644 vendor/github.com/zeebo/errs/group.go create mode 100644 vendor/go.uber.org/atomic/.codecov.yml create mode 100644 vendor/go.uber.org/atomic/.gitignore create mode 100644 vendor/go.uber.org/atomic/.travis.yml create mode 100644 vendor/go.uber.org/atomic/LICENSE.txt create mode 100644 vendor/go.uber.org/atomic/Makefile create mode 100644 vendor/go.uber.org/atomic/README.md create mode 100644 vendor/go.uber.org/atomic/atomic.go create mode 100644 vendor/go.uber.org/atomic/error.go create mode 100644 vendor/go.uber.org/atomic/glide.lock create mode 100644 vendor/go.uber.org/atomic/glide.yaml create mode 100644 vendor/go.uber.org/atomic/string.go create mode 100644 vendor/go.uber.org/multierr/.codecov.yml create mode 100644 vendor/go.uber.org/multierr/.gitignore create mode 100644 vendor/go.uber.org/multierr/.travis.yml create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/Makefile create mode 100644 vendor/go.uber.org/multierr/README.md create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/multierr/glide.lock create mode 100644 vendor/go.uber.org/multierr/glide.yaml create mode 100644 vendor/go.uber.org/zap/.codecov.yml create mode 100644 vendor/go.uber.org/zap/.gitignore create mode 100644 vendor/go.uber.org/zap/.readme.tmpl create mode 100644 vendor/go.uber.org/zap/.travis.yml create mode 100644 vendor/go.uber.org/zap/CHANGELOG.md create mode 100644 vendor/go.uber.org/zap/CODE_OF_CONDUCT.md create mode 100644 vendor/go.uber.org/zap/CONTRIBUTING.md create mode 100644 vendor/go.uber.org/zap/FAQ.md create mode 100644 vendor/go.uber.org/zap/LICENSE.txt create mode 100644 vendor/go.uber.org/zap/Makefile create mode 100644 vendor/go.uber.org/zap/README.md create mode 100644 vendor/go.uber.org/zap/array.go create mode 100644 vendor/go.uber.org/zap/buffer/buffer.go create mode 100644 vendor/go.uber.org/zap/buffer/pool.go create mode 100644 vendor/go.uber.org/zap/check_license.sh create mode 100644 vendor/go.uber.org/zap/config.go create mode 100644 vendor/go.uber.org/zap/doc.go create mode 100644 vendor/go.uber.org/zap/encoder.go create mode 100644 vendor/go.uber.org/zap/error.go create mode 100644 vendor/go.uber.org/zap/field.go create mode 100644 vendor/go.uber.org/zap/flag.go create mode 100644 vendor/go.uber.org/zap/glide.lock create mode 100644 vendor/go.uber.org/zap/glide.yaml create mode 100644 vendor/go.uber.org/zap/global.go create mode 100644 vendor/go.uber.org/zap/global_go112.go create mode 100644 vendor/go.uber.org/zap/global_prego112.go create mode 100644 vendor/go.uber.org/zap/http_handler.go create mode 100644 vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go create mode 100644 vendor/go.uber.org/zap/internal/color/color.go create mode 100644 vendor/go.uber.org/zap/internal/exit/exit.go create mode 100644 vendor/go.uber.org/zap/level.go create mode 100644 vendor/go.uber.org/zap/logger.go create mode 100644 vendor/go.uber.org/zap/options.go create mode 100644 vendor/go.uber.org/zap/sink.go create mode 100644 vendor/go.uber.org/zap/stacktrace.go create mode 100644 vendor/go.uber.org/zap/sugar.go create mode 100644 vendor/go.uber.org/zap/time.go create mode 100644 vendor/go.uber.org/zap/writer.go create mode 100644 vendor/go.uber.org/zap/zapcore/console_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/core.go create mode 100644 vendor/go.uber.org/zap/zapcore/doc.go create mode 100644 vendor/go.uber.org/zap/zapcore/encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/entry.go create mode 100644 vendor/go.uber.org/zap/zapcore/error.go create mode 100644 vendor/go.uber.org/zap/zapcore/field.go create mode 100644 vendor/go.uber.org/zap/zapcore/hook.go create mode 100644 vendor/go.uber.org/zap/zapcore/json_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/level.go create mode 100644 vendor/go.uber.org/zap/zapcore/level_strings.go create mode 100644 vendor/go.uber.org/zap/zapcore/marshaler.go create mode 100644 vendor/go.uber.org/zap/zapcore/memory_encoder.go create mode 100644 vendor/go.uber.org/zap/zapcore/sampler.go create mode 100644 vendor/go.uber.org/zap/zapcore/tee.go create mode 100644 vendor/go.uber.org/zap/zapcore/write_syncer.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_generic.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2b_ref.go create mode 100644 vendor/golang.org/x/crypto/blake2b/blake2x.go create mode 100644 vendor/golang.org/x/crypto/blake2b/register.go create mode 100644 vendor/golang.org/x/sync/semaphore/semaphore.go create mode 100644 vendor/storj.io/common/LICENSE create mode 100644 vendor/storj.io/common/encryption/aesgcm.go create mode 100644 vendor/storj.io/common/encryption/bits.go create mode 100644 vendor/storj.io/common/encryption/common.go create mode 100644 vendor/storj.io/common/encryption/encryption.go create mode 100644 vendor/storj.io/common/encryption/pad.go create mode 100644 vendor/storj.io/common/encryption/password.go create mode 100644 vendor/storj.io/common/encryption/path.go create mode 100644 vendor/storj.io/common/encryption/secretbox.go create mode 100644 vendor/storj.io/common/encryption/store.go create mode 100644 vendor/storj.io/common/encryption/transform.go create mode 100644 vendor/storj.io/common/errs2/collect.go create mode 100644 vendor/storj.io/common/errs2/doc.go create mode 100644 vendor/storj.io/common/errs2/group.go create mode 100644 vendor/storj.io/common/errs2/ignore.go create mode 100644 vendor/storj.io/common/errs2/rpc.go create mode 100644 vendor/storj.io/common/errs2/sanatizer.go create mode 100644 vendor/storj.io/common/fpath/atomic.go create mode 100644 vendor/storj.io/common/fpath/doc.go create mode 100644 vendor/storj.io/common/fpath/editor.go create mode 100644 vendor/storj.io/common/fpath/os.go create mode 100644 vendor/storj.io/common/fpath/path.go create mode 100644 vendor/storj.io/common/fpath/temp_data.go create mode 100644 vendor/storj.io/common/identity/certificate_authority.go create mode 100644 vendor/storj.io/common/identity/common.go create mode 100644 vendor/storj.io/common/identity/doc.go create mode 100644 vendor/storj.io/common/identity/generate.go create mode 100644 vendor/storj.io/common/identity/identity.go create mode 100644 vendor/storj.io/common/identity/utils.go create mode 100644 vendor/storj.io/common/internal/grpchook/hook.go create mode 100644 vendor/storj.io/common/macaroon/apikey.go create mode 100644 vendor/storj.io/common/macaroon/caveat.go create mode 100644 vendor/storj.io/common/macaroon/doc.go create mode 100644 vendor/storj.io/common/macaroon/macaroon.go create mode 100644 vendor/storj.io/common/macaroon/serialize.go create mode 100644 vendor/storj.io/common/macaroon/types.pb.go create mode 100644 vendor/storj.io/common/macaroon/types.proto create mode 100644 vendor/storj.io/common/memory/doc.go create mode 100644 vendor/storj.io/common/memory/size.go create mode 100644 vendor/storj.io/common/memory/sizes.go create mode 100644 vendor/storj.io/common/memory/string.go create mode 100644 vendor/storj.io/common/netutil/common.go create mode 100644 vendor/storj.io/common/netutil/timeout_linux.go create mode 100644 vendor/storj.io/common/netutil/timeout_other.go create mode 100644 vendor/storj.io/common/netutil/tracking.go create mode 100644 vendor/storj.io/common/paths/doc.go create mode 100644 vendor/storj.io/common/paths/path.go create mode 100644 vendor/storj.io/common/pb/alias.go create mode 100644 vendor/storj.io/common/pb/certificate.pb.go create mode 100644 vendor/storj.io/common/pb/certificate.proto create mode 100644 vendor/storj.io/common/pb/contact.pb.go create mode 100644 vendor/storj.io/common/pb/contact.proto create mode 100644 vendor/storj.io/common/pb/datarepair.pb.go create mode 100644 vendor/storj.io/common/pb/datarepair.proto create mode 100644 vendor/storj.io/common/pb/doc.go create mode 100644 vendor/storj.io/common/pb/encryption.pb.go create mode 100644 vendor/storj.io/common/pb/encryption.proto create mode 100644 vendor/storj.io/common/pb/encryption_access.pb.go create mode 100644 vendor/storj.io/common/pb/encryption_access.proto create mode 100644 vendor/storj.io/common/pb/gogo.proto create mode 100644 vendor/storj.io/common/pb/gracefulexit.pb.go create mode 100644 vendor/storj.io/common/pb/gracefulexit.proto create mode 100644 vendor/storj.io/common/pb/heldamount.pb.go create mode 100644 vendor/storj.io/common/pb/heldamount.proto create mode 100644 vendor/storj.io/common/pb/inspector.pb.go create mode 100644 vendor/storj.io/common/pb/inspector.proto create mode 100644 vendor/storj.io/common/pb/meta.pb.go create mode 100644 vendor/storj.io/common/pb/meta.proto create mode 100644 vendor/storj.io/common/pb/metainfo.pb.go create mode 100644 vendor/storj.io/common/pb/metainfo.proto create mode 100644 vendor/storj.io/common/pb/node.pb.go create mode 100644 vendor/storj.io/common/pb/node.proto create mode 100644 vendor/storj.io/common/pb/nodestats.pb.go create mode 100644 vendor/storj.io/common/pb/nodestats.proto create mode 100644 vendor/storj.io/common/pb/orders.pb.go create mode 100644 vendor/storj.io/common/pb/orders.proto create mode 100644 vendor/storj.io/common/pb/overlay.pb.go create mode 100644 vendor/storj.io/common/pb/overlay.proto create mode 100644 vendor/storj.io/common/pb/payments.pb.go create mode 100644 vendor/storj.io/common/pb/payments.proto create mode 100644 vendor/storj.io/common/pb/piecestore2.pb.go create mode 100644 vendor/storj.io/common/pb/piecestore2.proto create mode 100644 vendor/storj.io/common/pb/pointerdb.pb.go create mode 100644 vendor/storj.io/common/pb/pointerdb.proto create mode 100644 vendor/storj.io/common/pb/referralmanager.pb.go create mode 100644 vendor/storj.io/common/pb/referralmanager.proto create mode 100644 vendor/storj.io/common/pb/scannerValuer.go create mode 100644 vendor/storj.io/common/pb/scope.pb.go create mode 100644 vendor/storj.io/common/pb/scope.proto create mode 100644 vendor/storj.io/common/pb/streams.pb.go create mode 100644 vendor/storj.io/common/pb/streams.proto create mode 100644 vendor/storj.io/common/pb/types.go create mode 100644 vendor/storj.io/common/pb/utils.go create mode 100644 vendor/storj.io/common/pb/vouchers.pb.go create mode 100644 vendor/storj.io/common/pb/vouchers.proto create mode 100644 vendor/storj.io/common/peertls/doc.go create mode 100644 vendor/storj.io/common/peertls/extensions/doc.go create mode 100644 vendor/storj.io/common/peertls/extensions/extensions.go create mode 100644 vendor/storj.io/common/peertls/extensions/fuzz.go create mode 100644 vendor/storj.io/common/peertls/extensions/gob.go create mode 100644 vendor/storj.io/common/peertls/extensions/revocations.go create mode 100644 vendor/storj.io/common/peertls/peertls.go create mode 100644 vendor/storj.io/common/peertls/templates.go create mode 100644 vendor/storj.io/common/peertls/tlsopts/cert.go create mode 100644 vendor/storj.io/common/peertls/tlsopts/config.go create mode 100644 vendor/storj.io/common/peertls/tlsopts/doc.go create mode 100644 vendor/storj.io/common/peertls/tlsopts/options.go create mode 100644 vendor/storj.io/common/peertls/tlsopts/tls.go create mode 100644 vendor/storj.io/common/peertls/utils.go create mode 100644 vendor/storj.io/common/pkcrypto/common.go create mode 100644 vendor/storj.io/common/pkcrypto/doc.go create mode 100644 vendor/storj.io/common/pkcrypto/encoding.go create mode 100644 vendor/storj.io/common/pkcrypto/hashing.go create mode 100644 vendor/storj.io/common/pkcrypto/signing.go create mode 100644 vendor/storj.io/common/ranger/common.go create mode 100644 vendor/storj.io/common/ranger/doc.go create mode 100644 vendor/storj.io/common/ranger/file.go create mode 100644 vendor/storj.io/common/ranger/reader.go create mode 100644 vendor/storj.io/common/ranger/readerat.go create mode 100644 vendor/storj.io/common/readcloser/doc.go create mode 100644 vendor/storj.io/common/readcloser/fatal.go create mode 100644 vendor/storj.io/common/readcloser/lazy.go create mode 100644 vendor/storj.io/common/readcloser/limit.go create mode 100644 vendor/storj.io/common/readcloser/multi.go create mode 100644 vendor/storj.io/common/rpc/common.go create mode 100644 vendor/storj.io/common/rpc/conn.go create mode 100644 vendor/storj.io/common/rpc/dial.go create mode 100644 vendor/storj.io/common/rpc/doc.go create mode 100644 vendor/storj.io/common/rpc/known_ids.go create mode 100644 vendor/storj.io/common/rpc/lookup.go create mode 100644 vendor/storj.io/common/rpc/rpcpeer/peer.go create mode 100644 vendor/storj.io/common/rpc/rpcpool/pool.go create mode 100644 vendor/storj.io/common/rpc/rpcstatus/status.go create mode 100644 vendor/storj.io/common/rpc/rpctracing/common.go create mode 100644 vendor/storj.io/common/rpc/rpctracing/handler.go create mode 100644 vendor/storj.io/common/rpc/rpctracing/tracing.go create mode 100644 vendor/storj.io/common/signing/doc.go create mode 100644 vendor/storj.io/common/signing/encode.go create mode 100644 vendor/storj.io/common/signing/peers.go create mode 100644 vendor/storj.io/common/signing/sign.go create mode 100644 vendor/storj.io/common/signing/verify.go create mode 100644 vendor/storj.io/common/storj/bucket.go create mode 100644 vendor/storj.io/common/storj/doc.go create mode 100644 vendor/storj.io/common/storj/encryption.go create mode 100644 vendor/storj.io/common/storj/identity_version.go create mode 100644 vendor/storj.io/common/storj/metainfo.go create mode 100644 vendor/storj.io/common/storj/node.go create mode 100644 vendor/storj.io/common/storj/nodeurl.go create mode 100644 vendor/storj.io/common/storj/object.go create mode 100644 vendor/storj.io/common/storj/object_list_item.go create mode 100644 vendor/storj.io/common/storj/path.go create mode 100644 vendor/storj.io/common/storj/pieceid.go create mode 100644 vendor/storj.io/common/storj/piecekey.go create mode 100644 vendor/storj.io/common/storj/redundancy.go create mode 100644 vendor/storj.io/common/storj/segment.go create mode 100644 vendor/storj.io/common/storj/segmentid.go create mode 100644 vendor/storj.io/common/storj/serialnumber.go create mode 100644 vendor/storj.io/common/storj/streamid.go create mode 100644 vendor/storj.io/common/sync2/cooldown.go create mode 100644 vendor/storj.io/common/sync2/copy.go create mode 100644 vendor/storj.io/common/sync2/cycle.go create mode 100644 vendor/storj.io/common/sync2/doc.go create mode 100644 vendor/storj.io/common/sync2/fence.go create mode 100644 vendor/storj.io/common/sync2/io.go create mode 100644 vendor/storj.io/common/sync2/limiter.go create mode 100644 vendor/storj.io/common/sync2/nocopy.go create mode 100644 vendor/storj.io/common/sync2/parent_child_limiter.go create mode 100644 vendor/storj.io/common/sync2/pipe.go create mode 100644 vendor/storj.io/common/sync2/semaphore.go create mode 100644 vendor/storj.io/common/sync2/sleep.go create mode 100644 vendor/storj.io/common/sync2/success_threshold.go create mode 100644 vendor/storj.io/common/sync2/tee.go create mode 100644 vendor/storj.io/common/sync2/throttle.go create mode 100644 vendor/storj.io/common/sync2/workgroup.go create mode 100644 vendor/storj.io/common/uuid/db.go create mode 100644 vendor/storj.io/common/uuid/fuzz.go create mode 100644 vendor/storj.io/common/uuid/uuid.go create mode 100644 vendor/storj.io/drpc/.gitignore create mode 100644 vendor/storj.io/drpc/.golangci.yml create mode 100644 vendor/storj.io/drpc/Dockerfile.jenkins create mode 100644 vendor/storj.io/drpc/Jenkinsfile create mode 100644 vendor/storj.io/drpc/LICENSE create mode 100644 vendor/storj.io/drpc/Makefile create mode 100644 vendor/storj.io/drpc/README.md create mode 100644 vendor/storj.io/drpc/doc.go create mode 100644 vendor/storj.io/drpc/drpc.go create mode 100644 vendor/storj.io/drpc/drpcconn/README.md create mode 100644 vendor/storj.io/drpc/drpcconn/conn.go create mode 100644 vendor/storj.io/drpc/drpcconn/doc.go create mode 100644 vendor/storj.io/drpc/drpcctx/README.md create mode 100644 vendor/storj.io/drpc/drpcctx/doc.go create mode 100644 vendor/storj.io/drpc/drpcctx/transport.go create mode 100644 vendor/storj.io/drpc/drpcdebug/README.md create mode 100644 vendor/storj.io/drpc/drpcdebug/doc.go create mode 100644 vendor/storj.io/drpc/drpcdebug/log_disabled.go create mode 100644 vendor/storj.io/drpc/drpcdebug/log_enabled.go create mode 100644 vendor/storj.io/drpc/drpcerr/README.md create mode 100644 vendor/storj.io/drpc/drpcerr/doc.go create mode 100644 vendor/storj.io/drpc/drpcerr/err.go create mode 100644 vendor/storj.io/drpc/drpcmanager/README.md create mode 100644 vendor/storj.io/drpc/drpcmanager/doc.go create mode 100644 vendor/storj.io/drpc/drpcmanager/manager.go create mode 100644 vendor/storj.io/drpc/drpcmetadata/README.md create mode 100644 vendor/storj.io/drpc/drpcmetadata/doc.go create mode 100644 vendor/storj.io/drpc/drpcmetadata/invoke/README.md create mode 100644 vendor/storj.io/drpc/drpcmetadata/invoke/doc.go create mode 100644 vendor/storj.io/drpc/drpcmetadata/invoke/metadata.pb.go create mode 100644 vendor/storj.io/drpc/drpcmetadata/invoke/metadata.proto create mode 100644 vendor/storj.io/drpc/drpcmetadata/metadata.go create mode 100644 vendor/storj.io/drpc/drpcmux/README.md create mode 100644 vendor/storj.io/drpc/drpcmux/doc.go create mode 100644 vendor/storj.io/drpc/drpcmux/mux.go create mode 100644 vendor/storj.io/drpc/drpcsignal/README.md create mode 100644 vendor/storj.io/drpc/drpcsignal/doc.go create mode 100644 vendor/storj.io/drpc/drpcsignal/signal.go create mode 100644 vendor/storj.io/drpc/drpcstream/README.md create mode 100644 vendor/storj.io/drpc/drpcstream/chmutex.go create mode 100644 vendor/storj.io/drpc/drpcstream/doc.go create mode 100644 vendor/storj.io/drpc/drpcstream/stream.go create mode 100644 vendor/storj.io/drpc/drpcwire/README.md create mode 100644 vendor/storj.io/drpc/drpcwire/doc.go create mode 100644 vendor/storj.io/drpc/drpcwire/error.go create mode 100644 vendor/storj.io/drpc/drpcwire/packet.go create mode 100644 vendor/storj.io/drpc/drpcwire/packet_string.go create mode 100644 vendor/storj.io/drpc/drpcwire/split.go create mode 100644 vendor/storj.io/drpc/drpcwire/transport.go create mode 100644 vendor/storj.io/drpc/drpcwire/varint.go create mode 100644 vendor/storj.io/drpc/go.mod create mode 100644 vendor/storj.io/drpc/go.sum create mode 100644 vendor/storj.io/drpc/staticcheck.conf create mode 100644 vendor/storj.io/uplink/.clabot create mode 100644 vendor/storj.io/uplink/.gitignore create mode 100644 vendor/storj.io/uplink/CODE_OF_CONDUCT.md create mode 100644 vendor/storj.io/uplink/Jenkinsfile create mode 100644 vendor/storj.io/uplink/LICENSE create mode 100644 vendor/storj.io/uplink/MAINTAINERS.md create mode 100644 vendor/storj.io/uplink/Makefile create mode 100644 vendor/storj.io/uplink/README.md create mode 100644 vendor/storj.io/uplink/access.go create mode 100644 vendor/storj.io/uplink/bucket.go create mode 100644 vendor/storj.io/uplink/buckets.go create mode 100644 vendor/storj.io/uplink/common.go create mode 100644 vendor/storj.io/uplink/config.go create mode 100644 vendor/storj.io/uplink/doc.go create mode 100644 vendor/storj.io/uplink/download.go create mode 100644 vendor/storj.io/uplink/encryption.go create mode 100644 vendor/storj.io/uplink/go.mod create mode 100644 vendor/storj.io/uplink/go.sum create mode 100644 vendor/storj.io/uplink/internal/expose/exposed.go create mode 100644 vendor/storj.io/uplink/internal/telemetryclient/client.go create mode 100644 vendor/storj.io/uplink/object.go create mode 100644 vendor/storj.io/uplink/objects.go create mode 100644 vendor/storj.io/uplink/private/ecclient/client.go create mode 100644 vendor/storj.io/uplink/private/ecclient/common.go create mode 100644 vendor/storj.io/uplink/private/eestream/common.go create mode 100644 vendor/storj.io/uplink/private/eestream/decode.go create mode 100644 vendor/storj.io/uplink/private/eestream/encode.go create mode 100644 vendor/storj.io/uplink/private/eestream/piecebuf.go create mode 100644 vendor/storj.io/uplink/private/eestream/rs.go create mode 100644 vendor/storj.io/uplink/private/eestream/stripe.go create mode 100644 vendor/storj.io/uplink/private/eestream/unsafe_rs.go create mode 100644 vendor/storj.io/uplink/private/metainfo/batch.go create mode 100644 vendor/storj.io/uplink/private/metainfo/client.go create mode 100644 vendor/storj.io/uplink/private/metainfo/client_old.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/paths.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/stream.go create mode 100644 vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go create mode 100644 vendor/storj.io/uplink/private/piecestore/buffering.go create mode 100644 vendor/storj.io/uplink/private/piecestore/client.go create mode 100644 vendor/storj.io/uplink/private/piecestore/download.go create mode 100644 vendor/storj.io/uplink/private/piecestore/upload.go create mode 100644 vendor/storj.io/uplink/private/piecestore/verification.go create mode 100644 vendor/storj.io/uplink/private/storage/segments/common.go create mode 100644 vendor/storj.io/uplink/private/storage/segments/peek.go create mode 100644 vendor/storj.io/uplink/private/storage/segments/size.go create mode 100644 vendor/storj.io/uplink/private/storage/segments/store.go create mode 100644 vendor/storj.io/uplink/private/storage/streams/eof.go create mode 100644 vendor/storj.io/uplink/private/storage/streams/path.go create mode 100644 vendor/storj.io/uplink/private/storage/streams/shim.go create mode 100644 vendor/storj.io/uplink/private/storage/streams/size.go create mode 100644 vendor/storj.io/uplink/private/storage/streams/store.go create mode 100644 vendor/storj.io/uplink/private/stream/common.go create mode 100644 vendor/storj.io/uplink/private/stream/download.go create mode 100644 vendor/storj.io/uplink/private/stream/upload.go create mode 100644 vendor/storj.io/uplink/project.go create mode 100644 vendor/storj.io/uplink/upload.go diff --git a/go.mod b/go.mod index 3ced0dedb..b8ae83584 100644 --- a/go.mod +++ b/go.mod @@ -67,6 +67,7 @@ require ( google.golang.org/api v0.21.1-0.20200411000818-c8cf5cff125e google.golang.org/genproto v0.0.0-20200225123651-fc8f55426688 // indirect gopkg.in/yaml.v2 v2.2.8 + storj.io/uplink v1.0.5 ) go 1.13 diff --git a/go.sum b/go.sum index e7d9cf4c7..1b69a51c2 100644 --- a/go.sum +++ b/go.sum @@ -52,6 +52,7 @@ github.com/aalpar/deheap v0.0.0-20191229192855-f837f7a9ba26 h1:vn0bf9tSbwrl9aBeG github.com/aalpar/deheap v0.0.0-20191229192855-f837f7a9ba26/go.mod h1:EJFoWbcEEVK22GYKONJjtMNamGYe6p+3x1Pr6zV5gFs= github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -78,6 +79,18 @@ github.com/billziss-gh/cgofuse v1.2.0 h1:FMdQSygSBpD4yEPENJcmvfCdmNWMVkPLlD7wWdl github.com/billziss-gh/cgofuse v1.2.0/go.mod h1:LJjoaUojlVjgo5GQoEJTcJNqZJeRU0nCR84CyxKt2YM= github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA= +github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/calebcase/tmpfile v1.0.1 h1:vD8FSrbsbexhep39/6mvtbIHS3GzIRqiprDNCF6QqSk= +github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -97,6 +110,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -125,6 +139,7 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -189,12 +204,14 @@ github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbc github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3 h1:QyB6CQGLB65Al72mAIbqrkGRk56JdGMHgBziM3F0FCw= github.com/jlaffaye/ftp v0.0.0-20191218041957-e1b8fdd0dcc3/go.mod h1:PwUeyujmhaGohgOf0kJKxPfk3HcRv8QD/wAUN44go4k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -209,6 +226,7 @@ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uia github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -242,6 +260,8 @@ github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuuj github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5 h1:l16XLUUJ34wIz+RIvLhSwGvLvKyy+W598b135bJN6mg= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -263,8 +283,10 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs= github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.9.0 h1:SZjF721BByVj8QH636/8S2DnX4n0Re3SteMmw3N+tzc= github.com/onsi/ginkgo v1.9.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.6.0 h1:8XTW0fcJZEq9q+Upcyws4JSGua2MFysCL5xkaSgHc+M= github.com/onsi/gomega v1.6.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= @@ -332,11 +354,17 @@ github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:X github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 h1:xHQewZjohU9/wUsyC99navCjQDNHtTgUOM/J1jAbzfw= +github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1/go.mod h1:7NL9UAYQnRM5iKHUCld3tf02fKb5Dft+41+VckASUy0= +github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monkit/v3 v3.0.6 h1:BKPrEaLokVAxlwHkD7jawViBa/IU9/bgXbZLWgjbdSM= +github.com/spacemonkeygo/monkit/v3 v3.0.6/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= +github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -365,6 +393,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1 github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b h1:dLkqBELopfQNhe8S9ucnSf+HhiUCgK/hPIjVG0f9GlY= +github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b/go.mod h1:5oyMAv4hrBEKqBwORFsiqIrCNCmL2qcZLQTdJLYeYIc= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= @@ -374,6 +404,14 @@ github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 h1:Ud2neINE1YFEwrcJ4 github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS9f0uCWtTr8= github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4= +github.com/zeebo/admission/v2 v2.0.0/go.mod h1:gSeHGelDHW7Vq6UyJo2boeSt/6Dsnqpisv0i4YZSOyM= +github.com/zeebo/admission/v3 v3.0.1/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= +github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo= +github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -384,11 +422,15 @@ go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= goftp.io/server v0.3.2 h1:bcsI4ijbvFZkA4rrUtIE/t1jNhT+0uSkiTQ4ASjZAXQ= goftp.io/server v0.3.2/go.mod h1:wfeAZeQgacupLVl+Ex3ozYFaAGNfCKYZiZNxLzgw6z4= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -398,6 +440,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -446,6 +490,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -488,6 +533,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -611,3 +657,9 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +storj.io/common v0.0.0-20200429074521-4ba140e4b747 h1:Ne1x0M80uNyN6tHIs15CGJqHbreKbvH5BOq4jdWsqMc= +storj.io/common v0.0.0-20200429074521-4ba140e4b747/go.mod h1:lfsaMdtHwrUOtSYkw73meCyZMUYiaFBKVqx6zgeSz2o= +storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw= +storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= +storj.io/uplink v1.0.5 h1:RH6LUZQPOJZ01JpM0YmghDu5xyex5gyn32NSCzsPSr4= +storj.io/uplink v1.0.5/go.mod h1:GkChEUgHFuUR2WNpqFw3NeqglXz6/zp6n5Rxt0IVfHw= diff --git a/vendor/github.com/btcsuite/btcutil/LICENSE b/vendor/github.com/btcsuite/btcutil/LICENSE new file mode 100644 index 000000000..3e7b16791 --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/LICENSE @@ -0,0 +1,16 @@ +ISC License + +Copyright (c) 2013-2017 The btcsuite developers +Copyright (c) 2016-2017 The Lightning Network Developers + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/btcsuite/btcutil/base58/README.md b/vendor/github.com/btcsuite/btcutil/base58/README.md new file mode 100644 index 000000000..98dfb1db8 --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/base58/README.md @@ -0,0 +1,34 @@ +base58 +========== + +[![Build Status](http://img.shields.io/travis/btcsuite/btcutil.svg)](https://travis-ci.org/btcsuite/btcutil) +[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/btcsuite/btcutil/base58) + +Package base58 provides an API for encoding and decoding to and from the +modified base58 encoding. It also provides an API to do Base58Check encoding, +as described [here](https://en.bitcoin.it/wiki/Base58Check_encoding). + +A comprehensive suite of tests is provided to ensure proper functionality. + +## Installation and Updating + +```bash +$ go get -u github.com/btcsuite/btcutil/base58 +``` + +## Examples + +* [Decode Example](http://godoc.org/github.com/btcsuite/btcutil/base58#example-Decode) + Demonstrates how to decode modified base58 encoded data. +* [Encode Example](http://godoc.org/github.com/btcsuite/btcutil/base58#example-Encode) + Demonstrates how to encode data using the modified base58 encoding scheme. +* [CheckDecode Example](http://godoc.org/github.com/btcsuite/btcutil/base58#example-CheckDecode) + Demonstrates how to decode Base58Check encoded data. +* [CheckEncode Example](http://godoc.org/github.com/btcsuite/btcutil/base58#example-CheckEncode) + Demonstrates how to encode data using the Base58Check encoding scheme. + +## License + +Package base58 is licensed under the [copyfree](http://copyfree.org) ISC +License. diff --git a/vendor/github.com/btcsuite/btcutil/base58/alphabet.go b/vendor/github.com/btcsuite/btcutil/base58/alphabet.go new file mode 100644 index 000000000..6bb39fef1 --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/base58/alphabet.go @@ -0,0 +1,49 @@ +// Copyright (c) 2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +// AUTOGENERATED by genalphabet.go; do not edit. + +package base58 + +const ( + // alphabet is the modified base58 alphabet used by Bitcoin. + alphabet = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" + + alphabetIdx0 = '1' +) + +var b58 = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 0, 1, 2, 3, 4, 5, 6, + 7, 8, 255, 255, 255, 255, 255, 255, + 255, 9, 10, 11, 12, 13, 14, 15, + 16, 255, 17, 18, 19, 20, 21, 255, + 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 255, 255, 255, 255, 255, + 255, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 255, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, +} diff --git a/vendor/github.com/btcsuite/btcutil/base58/base58.go b/vendor/github.com/btcsuite/btcutil/base58/base58.go new file mode 100644 index 000000000..19a72de2c --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/base58/base58.go @@ -0,0 +1,75 @@ +// Copyright (c) 2013-2015 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58 + +import ( + "math/big" +) + +//go:generate go run genalphabet.go + +var bigRadix = big.NewInt(58) +var bigZero = big.NewInt(0) + +// Decode decodes a modified base58 string to a byte slice. +func Decode(b string) []byte { + answer := big.NewInt(0) + j := big.NewInt(1) + + scratch := new(big.Int) + for i := len(b) - 1; i >= 0; i-- { + tmp := b58[b[i]] + if tmp == 255 { + return []byte("") + } + scratch.SetInt64(int64(tmp)) + scratch.Mul(j, scratch) + answer.Add(answer, scratch) + j.Mul(j, bigRadix) + } + + tmpval := answer.Bytes() + + var numZeros int + for numZeros = 0; numZeros < len(b); numZeros++ { + if b[numZeros] != alphabetIdx0 { + break + } + } + flen := numZeros + len(tmpval) + val := make([]byte, flen) + copy(val[numZeros:], tmpval) + + return val +} + +// Encode encodes a byte slice to a modified base58 string. +func Encode(b []byte) string { + x := new(big.Int) + x.SetBytes(b) + + answer := make([]byte, 0, len(b)*136/100) + for x.Cmp(bigZero) > 0 { + mod := new(big.Int) + x.DivMod(x, bigRadix, mod) + answer = append(answer, alphabet[mod.Int64()]) + } + + // leading zero bytes + for _, i := range b { + if i != 0 { + break + } + answer = append(answer, alphabetIdx0) + } + + // reverse + alen := len(answer) + for i := 0; i < alen/2; i++ { + answer[i], answer[alen-1-i] = answer[alen-1-i], answer[i] + } + + return string(answer) +} diff --git a/vendor/github.com/btcsuite/btcutil/base58/base58check.go b/vendor/github.com/btcsuite/btcutil/base58/base58check.go new file mode 100644 index 000000000..7cdafeeec --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/base58/base58check.go @@ -0,0 +1,52 @@ +// Copyright (c) 2013-2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +package base58 + +import ( + "crypto/sha256" + "errors" +) + +// ErrChecksum indicates that the checksum of a check-encoded string does not verify against +// the checksum. +var ErrChecksum = errors.New("checksum error") + +// ErrInvalidFormat indicates that the check-encoded string has an invalid format. +var ErrInvalidFormat = errors.New("invalid format: version and/or checksum bytes missing") + +// checksum: first four bytes of sha256^2 +func checksum(input []byte) (cksum [4]byte) { + h := sha256.Sum256(input) + h2 := sha256.Sum256(h[:]) + copy(cksum[:], h2[:4]) + return +} + +// CheckEncode prepends a version byte and appends a four byte checksum. +func CheckEncode(input []byte, version byte) string { + b := make([]byte, 0, 1+len(input)+4) + b = append(b, version) + b = append(b, input[:]...) + cksum := checksum(b) + b = append(b, cksum[:]...) + return Encode(b) +} + +// CheckDecode decodes a string that was encoded with CheckEncode and verifies the checksum. +func CheckDecode(input string) (result []byte, version byte, err error) { + decoded := Decode(input) + if len(decoded) < 5 { + return nil, 0, ErrInvalidFormat + } + version = decoded[0] + var cksum [4]byte + copy(cksum[:], decoded[len(decoded)-4:]) + if checksum(decoded[:len(decoded)-4]) != cksum { + return nil, 0, ErrChecksum + } + payload := decoded[1 : len(decoded)-4] + result = append(result, payload...) + return +} diff --git a/vendor/github.com/btcsuite/btcutil/base58/cov_report.sh b/vendor/github.com/btcsuite/btcutil/base58/cov_report.sh new file mode 100644 index 000000000..307f05b76 --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/base58/cov_report.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +type gocov >/dev/null 2>&1 +if [ $? -ne 0 ]; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi +gocov test | gocov report diff --git a/vendor/github.com/btcsuite/btcutil/base58/doc.go b/vendor/github.com/btcsuite/btcutil/base58/doc.go new file mode 100644 index 000000000..9a2c0e6e3 --- /dev/null +++ b/vendor/github.com/btcsuite/btcutil/base58/doc.go @@ -0,0 +1,29 @@ +// Copyright (c) 2014 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* +Package base58 provides an API for working with modified base58 and Base58Check +encodings. + +Modified Base58 Encoding + +Standard base58 encoding is similar to standard base64 encoding except, as the +name implies, it uses a 58 character alphabet which results in an alphanumeric +string and allows some characters which are problematic for humans to be +excluded. Due to this, there can be various base58 alphabets. + +The modified base58 alphabet used by Bitcoin, and hence this package, omits the +0, O, I, and l characters that look the same in many fonts and are therefore +hard to humans to distinguish. + +Base58Check Encoding Scheme + +The Base58Check encoding scheme is primarily used for Bitcoin addresses at the +time of this writing, however it can be used to generically encode arbitrary +byte arrays into human-readable strings along with a version byte that can be +used to differentiate the same payload. For Bitcoin addresses, the extra +version is used to differentiate the network of otherwise identical public keys +which helps prevent using an address intended for one network on another. +*/ +package base58 diff --git a/vendor/github.com/calebcase/tmpfile/.golangci.yml b/vendor/github.com/calebcase/tmpfile/.golangci.yml new file mode 100644 index 000000000..79bb0fabc --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/.golangci.yml @@ -0,0 +1,45 @@ +run: + deadline: 10m + issues-exit-code: 1 + tests: true + + skip-files: + - "^doc.*\\.go$" + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - errcheck + - gocritic + - gofmt + - golint + - govet + - ineffassign + - misspell + - nakedret + - scopelint + - staticcheck + - structcheck + - unconvert + - varcheck + fast: false + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + +linters-settings: + gocyclo: + min-complexity: 10 + maligned: + suggest-new: true + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + new: false + exclude-use-default: false diff --git a/vendor/github.com/calebcase/tmpfile/LICENSE b/vendor/github.com/calebcase/tmpfile/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/calebcase/tmpfile/LICENSE.golang b/vendor/github.com/calebcase/tmpfile/LICENSE.golang new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/LICENSE.golang @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/calebcase/tmpfile/README.md b/vendor/github.com/calebcase/tmpfile/README.md new file mode 100644 index 000000000..b74ed8710 --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/README.md @@ -0,0 +1,30 @@ +[![Documentation][godoc.badge]][godoc] +[![Test Status][workflow.tests.badge]][workflow.tests] + +# Cross Platform Temporary Files + +This library attempts to bridge the gap between the what is provided in +[ioutil.TempFile][ioutil.tempfile] and the best practice of ensuring temporary +files are ***always*** deleted when the application exits. + +The normal way to do this on a POSIX system is to use the behavior of +[unlink][posix.unlink] to immediately remove the directory entry for the +temporary file. The OS then ensures that when all open file handles on the file +are close that the file resources are removed. Unfortunately, despite Go having +[os.Remove][os.remove] this does not work on Windows because on Windows it is +necessary to open the files with special flags +([FILE_SHARE_DELETE][windows.flags.share], +[FILE_FLAG_DELETE_ON_CLOSE][windows.flags.on_close]) to enable removing a file +that is open (and ioutil does not do this). + +--- + +[godoc.badge]: https://godoc.org/github.com/calebcase/tmpfile?status.svg +[godoc]: https://godoc.org/github.com/calebcase/tmpfile +[ioutil.tempfile]: https://golang.org/pkg/io/ioutil/#TempFile +[os.remove]: https://golang.org/pkg/os/#Remove +[posix.unlink]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/unlink.html +[windows.flags.on_close]: https://github.com/golang/sys/blob/master/windows/types_windows.go#L108 +[windows.flags.share]: https://github.com/golang/sys/blob/master/windows/types_windows.go#L71 +[workflow.tests.badge]: https://github.com/calebcase/tmpfile/workflows/tests/badge.svg +[workflow.tests]: https://github.com/calebcase/tmpfile/actions?query=workflow%3Atests diff --git a/vendor/github.com/calebcase/tmpfile/doc.go b/vendor/github.com/calebcase/tmpfile/doc.go new file mode 100644 index 000000000..fd682b74e --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/doc.go @@ -0,0 +1,9 @@ +// Package tmpfile provides a cross platform facility for creating temporary +// files that are automatically cleaned up (even in the event of an unexpected +// process exit). +// +// tmpfile provides support for at least Linux, OSX, and Windows. Generally any +// POSIX system that adheres to the semantics of unlink +// (https://pubs.opengroup.org/onlinepubs/9699919799/functions/unlink.html) +// should work. Special handling is provided for other platforms. +package tmpfile diff --git a/vendor/github.com/calebcase/tmpfile/go.mod b/vendor/github.com/calebcase/tmpfile/go.mod new file mode 100644 index 000000000..7decac532 --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/go.mod @@ -0,0 +1,5 @@ +module github.com/calebcase/tmpfile + +go 1.13 + +require golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 diff --git a/vendor/github.com/calebcase/tmpfile/go.sum b/vendor/github.com/calebcase/tmpfile/go.sum new file mode 100644 index 000000000..b518499e2 --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 h1:gSbV7h1NRL2G1xTg/owz62CST1oJBmxy4QpMMregXVQ= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/calebcase/tmpfile/tmpfile.go b/vendor/github.com/calebcase/tmpfile/tmpfile.go new file mode 100644 index 000000000..0ace62227 --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/tmpfile.go @@ -0,0 +1,25 @@ +// +build !windows + +package tmpfile + +import ( + "io/ioutil" + "os" +) + +// New creates a new temporary file in the directory dir using ioutil.TempFile +// and then unlinks the file with os.Remove to ensure the file is deleted when +// the calling process exists. +func New(dir, pattern string) (f *os.File, err error) { + f, err = ioutil.TempFile(dir, pattern) + if err != nil { + return + } + + err = os.Remove(f.Name()) + if err != nil { + return + } + + return +} diff --git a/vendor/github.com/calebcase/tmpfile/tmpfile_windows.go b/vendor/github.com/calebcase/tmpfile/tmpfile_windows.go new file mode 100644 index 000000000..3820b5698 --- /dev/null +++ b/vendor/github.com/calebcase/tmpfile/tmpfile_windows.go @@ -0,0 +1,94 @@ +// Copyright 2010 The Go Authors. All rights reserved. Use of this source code +// is governed by a BSD-style license that can be found in the LICENSE.golang +// file. +// +// This is a copied and modified version of the code provided in: +// https://golang.org/src/io/ioutil/tempfile.go + +package tmpfile + +import ( + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/sys/windows" +) + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextRandom() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// New creates a new temporary file in the directory dir using the same method +// as ioutil.TempFile and then unlinks the file with os.Remove to ensure the +// file is deleted when the calling process exists. +func New(dir, pattern string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + var prefix, suffix string + if pos := strings.LastIndex(pattern, "*"); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextRandom()+suffix) + + // https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea + handle, err := windows.CreateFile( + windows.StringToUTF16Ptr(name), // File Name + windows.GENERIC_READ|windows.GENERIC_WRITE|windows.DELETE, // Desired Access + windows.FILE_SHARE_DELETE, // Share Mode + nil, // Security Attributes + windows.CREATE_NEW, // Create Disposition + windows.FILE_ATTRIBUTE_TEMPORARY|windows.FILE_FLAG_DELETE_ON_CLOSE, // Flags & Attributes + 0, // Template File + ) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + + continue + } + + f = os.NewFile(uintptr(handle), name) + + break + } + + err = os.Remove(f.Name()) + if err != nil { + return + } + + return f, nil +} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 000000000..3d97fc7a2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 000000000..1b4f6c208 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000..f57de90da --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000..00d65f327 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000..a26b046d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 000000000..24552483c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000..63b0f08be --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 000000000..fe1bd7d90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000..93464c91c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000..e748e1730 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000..3abfed2cf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000..0f5fb173e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000..d4db5a1c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000..686bd2a09 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,604 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000..53ebd8cca --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,368 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000..d17f80209 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000..b3aa39190 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000..f48a75676 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..b6cad9083 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000..7ffd3c29d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..d55a335d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000..aca8eed02 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000..c9e5fa020 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,599 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000..40ea3dd93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000..5a5fd93f7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 000000000..9b1538d05 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3006 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 000000000..997f57c1e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 000000000..f520106e0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000..bb2622f28 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 000000000..00d6c7ad9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000..0407ba85d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000..1d6c6aa0e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000..1ce0be2fa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000..9324f6542 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000..38439fa99 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 000000000..b175d1b64 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 000000000..c1cf7bf85 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/minio/sha256-simd/.gitignore b/vendor/github.com/minio/sha256-simd/.gitignore new file mode 100644 index 000000000..c56069fe2 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/.gitignore @@ -0,0 +1 @@ +*.test \ No newline at end of file diff --git a/vendor/github.com/minio/sha256-simd/.travis.yml b/vendor/github.com/minio/sha256-simd/.travis.yml new file mode 100644 index 000000000..744e64cfd --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/.travis.yml @@ -0,0 +1,24 @@ +sudo: required +dist: trusty +language: go + +os: +- linux + +go: +- tip +- 1.11.x + +env: +- ARCH=x86_64 +- ARCH=i686 + +matrix: + fast_finish: true + allow_failures: + - go: tip + +script: +- diff -au <(gofmt -d .) <(printf "") +- go test -race -v ./... +- go tool vet -asmdecl . diff --git a/vendor/github.com/minio/sha256-simd/LICENSE b/vendor/github.com/minio/sha256-simd/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/sha256-simd/README.md b/vendor/github.com/minio/sha256-simd/README.md new file mode 100644 index 000000000..5282d83ad --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/README.md @@ -0,0 +1,133 @@ +# sha256-simd + +Accelerate SHA256 computations in pure Go using AVX512, SHA Extensions and AVX2 for Intel and ARM64 for ARM. On AVX512 it provides an up to 8x improvement (over 3 GB/s per core) in comparison to AVX2. SHA Extensions give a performance boost of close to 4x over AVX2. + +## Introduction + +This package is designed as a replacement for `crypto/sha256`. For Intel CPUs it has two flavors for AVX512 and AVX2 (AVX/SSE are also supported). For ARM CPUs with the Cryptography Extensions, advantage is taken of the SHA2 instructions resulting in a massive performance improvement. + +This package uses Golang assembly. The AVX512 version is based on the Intel's "multi-buffer crypto library for IPSec" whereas the other Intel implementations are described in "Fast SHA-256 Implementations on Intel Architecture Processors" by J. Guilford et al. + +## New: Support for Intel SHA Extensions + +Support for the Intel SHA Extensions has been added by Kristofer Peterson (@svenski123), originally developed for spacemeshos [here](https://github.com/spacemeshos/POET/issues/23). On CPUs that support it (known thus far Intel Celeron J3455 and AMD Ryzen) it gives a significant boost in performance (with thanks to @AudriusButkevicius for reporting the results; full results [here](https://github.com/minio/sha256-simd/pull/37#issuecomment-451607827)). + +``` +$ benchcmp avx2.txt sha-ext.txt +benchmark AVX2 MB/s SHA Ext MB/s speedup +BenchmarkHash5M 514.40 1975.17 3.84x +``` + +Thanks to Kristofer Peterson, we also added additional performance changes such as optimized padding, endian conversions which sped up all implementations i.e. Intel SHA alone while doubled performance for small sizes, the other changes increased everything roughly 50%. + +## Support for AVX512 + +We have added support for AVX512 which results in an up to 8x performance improvement over AVX2 (3.0 GHz Xeon Platinum 8124M CPU): + +``` +$ benchcmp avx2.txt avx512.txt +benchmark AVX2 MB/s AVX512 MB/s speedup +BenchmarkHash5M 448.62 3498.20 7.80x +``` + +The original code was developed by Intel as part of the [multi-buffer crypto library](https://github.com/intel/intel-ipsec-mb) for IPSec or more specifically this [AVX512](https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm) implementation. The key idea behind it is to process a total of 16 checksums in parallel by “transposing” 16 (independent) messages of 64 bytes between a total of 16 ZMM registers (each 64 bytes wide). + +Transposing the input messages means that in order to take full advantage of the speedup you need to have a (server) workload where multiple threads are doing SHA256 calculations in parallel. Unfortunately for this algorithm it is not possible for two message blocks processed in parallel to be dependent on one another — because then the (interim) result of the first part of the message has to be an input into the processing of the second part of the message. + +Whereas the original Intel C implementation requires some sort of explicit scheduling of messages to be processed in parallel, for Golang it makes sense to take advantage of channels in order to group messages together and use channels as well for sending back the results (thereby effectively decoupling the calculations). We have implemented a fairly simple scheduling mechanism that seems to work well in practice. + +Due to this different way of scheduling, we decided to use an explicit method to instantiate the AVX512 version. Essentially one or more AVX512 processing servers ([`Avx512Server`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L294)) have to be created whereby each server can hash over 3 GB/s on a single core. An `hash.Hash` object ([`Avx512Digest`](https://github.com/minio/sha256-simd/blob/master/sha256blockAvx512_amd64.go#L45)) is then instantiated using one of these servers and used in the regular fashion: + +```go +import "github.com/minio/sha256-simd" + +func main() { + server := sha256.NewAvx512Server() + h512 := sha256.NewAvx512(server) + h512.Write(fileBlock) + digest := h512.Sum([]byte{}) +} +``` + +Note that, because of the scheduling overhead, for small messages (< 1 MB) you will be better off using the regular SHA256 hashing (but those are typically not performance critical anyway). Some other tips to get the best performance: +* Have many go routines doing SHA256 calculations in parallel. +* Try to Write() messages in multiples of 64 bytes. +* Try to keep the overall length of messages to a roughly similar size ie. 5 MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as much as possible). + +More detailed information can be found in this [blog](https://blog.minio.io/accelerate-sha256-up-to-8x-over-3-gb-s-per-core-with-avx512-a0b1d64f78f) post including scaling across cores. + +## Drop-In Replacement + +The following code snippet shows how you can use `github.com/minio/sha256-simd`. This will automatically select the fastest method for the architecture on which it will be executed. + +```go +import "github.com/minio/sha256-simd" + +func main() { + ... + shaWriter := sha256.New() + io.Copy(shaWriter, file) + ... +} +``` + +## Performance + +Below is the speed in MB/s for a single core (ranked fast to slow) for blocks larger than 1 MB. + +| Processor | SIMD | Speed (MB/s) | +| --------------------------------- | ------- | ------------:| +| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 | +| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 | +| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 | +| 3.0 GHz Intel Xeon Platinum 8124M | AVX2 | 449 | +| 3.1 GHz Intel Core i7 | AVX | 362 | +| 3.1 GHz Intel Core i7 | SSE | 299 | + +## asm2plan9s + +In order to be able to work more easily with AVX512/AVX2 instructions, a separate tool was developed to convert SIMD instructions into the corresponding BYTE sequence as accepted by Go assembly. See [asm2plan9s](https://github.com/minio/asm2plan9s) for more information. + +## Why and benefits + +One of the most performance sensitive parts of the [Minio](https://github.com/minio/minio) object storage server is related to SHA256 hash sums calculations. For instance during multi part uploads each part that is uploaded needs to be verified for data integrity by the server. + +Other applications that can benefit from enhanced SHA256 performance are deduplication in storage systems, intrusion detection, version control systems, integrity checking, etc. + +## ARM SHA Extensions + +The 64-bit ARMv8 core has introduced new instructions for SHA1 and SHA2 acceleration as part of the [Cryptography Extensions](http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0501f/CHDFJBCJ.html). Below you can see a small excerpt highlighting one of the rounds as is done for the SHA256 calculation process (for full code see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)). + + ``` + sha256h q2, q3, v9.4s + sha256h2 q3, q4, v9.4s + sha256su0 v5.4s, v6.4s + rev32 v8.16b, v8.16b + add v9.4s, v7.4s, v18.4s + mov v4.16b, v2.16b + sha256h q2, q3, v10.4s + sha256h2 q3, q4, v10.4s + sha256su0 v6.4s, v7.4s + sha256su1 v5.4s, v7.4s, v8.4s + ``` + +### Detailed benchmarks + +Benchmarks generated on a 1.2 Ghz Quad-Core ARM Cortex A53 equipped [Pine64](https://www.pine64.com/). + +``` +minio@minio-arm:$ benchcmp golang.txt arm64.txt +benchmark golang arm64 speedup +BenchmarkHash8Bytes-4 0.68 MB/s 5.70 MB/s 8.38x +BenchmarkHash1K-4 5.65 MB/s 326.30 MB/s 57.75x +BenchmarkHash8K-4 6.00 MB/s 570.63 MB/s 95.11x +BenchmarkHash1M-4 6.05 MB/s 638.23 MB/s 105.49x +``` + +## License + +Released under the Apache License v2.0. You can find the complete text in the file LICENSE. + +## Contributing + +Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/sha256-simd/appveyor.yml b/vendor/github.com/minio/sha256-simd/appveyor.yml new file mode 100644 index 000000000..a66bfa9f2 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/appveyor.yml @@ -0,0 +1,32 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +# Platform. +platform: x64 + +clone_folder: c:\gopath\src\github.com\minio\sha256-simd + +# environment variables +environment: + GOPATH: c:\gopath + GO15VENDOREXPERIMENT: 1 + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go test . + - go test -race . + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/vendor/github.com/minio/sha256-simd/cpuid.go b/vendor/github.com/minio/sha256-simd/cpuid.go new file mode 100644 index 000000000..878ad4638 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid.go @@ -0,0 +1,119 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +// True when SIMD instructions are available. +var avx512 bool +var avx2 bool +var avx bool +var sse bool +var sse2 bool +var sse3 bool +var ssse3 bool +var sse41 bool +var sse42 bool +var popcnt bool +var sha bool +var armSha = haveArmSha() + +func init() { + var _xsave bool + var _osxsave bool + var _avx bool + var _avx2 bool + var _avx512f bool + var _avx512dq bool + // var _avx512pf bool + // var _avx512er bool + // var _avx512cd bool + var _avx512bw bool + var _avx512vl bool + var _sseState bool + var _avxState bool + var _opmaskState bool + var _zmmHI256State bool + var _hi16ZmmState bool + + mfi, _, _, _ := cpuid(0) + + if mfi >= 1 { + _, _, c, d := cpuid(1) + + sse = (d & (1 << 25)) != 0 + sse2 = (d & (1 << 26)) != 0 + sse3 = (c & (1 << 0)) != 0 + ssse3 = (c & (1 << 9)) != 0 + sse41 = (c & (1 << 19)) != 0 + sse42 = (c & (1 << 20)) != 0 + popcnt = (c & (1 << 23)) != 0 + _xsave = (c & (1 << 26)) != 0 + _osxsave = (c & (1 << 27)) != 0 + _avx = (c & (1 << 28)) != 0 + } + + if mfi >= 7 { + _, b, _, _ := cpuid(7) + + _avx2 = (b & (1 << 5)) != 0 + _avx512f = (b & (1 << 16)) != 0 + _avx512dq = (b & (1 << 17)) != 0 + // _avx512pf = (b & (1 << 26)) != 0 + // _avx512er = (b & (1 << 27)) != 0 + // _avx512cd = (b & (1 << 28)) != 0 + _avx512bw = (b & (1 << 30)) != 0 + _avx512vl = (b & (1 << 31)) != 0 + sha = (b & (1 << 29)) != 0 + } + + // Stop here if XSAVE unsupported or not enabled + if !_xsave || !_osxsave { + return + } + + if _xsave && _osxsave { + a, _ := xgetbv(0) + + _sseState = (a & (1 << 1)) != 0 + _avxState = (a & (1 << 2)) != 0 + _opmaskState = (a & (1 << 5)) != 0 + _zmmHI256State = (a & (1 << 6)) != 0 + _hi16ZmmState = (a & (1 << 7)) != 0 + } else { + _sseState = true + } + + // Very unlikely that OS would enable XSAVE and then disable SSE + if !_sseState { + sse = false + sse2 = false + sse3 = false + ssse3 = false + sse41 = false + sse42 = false + } + + if _avxState { + avx = _avx + avx2 = _avx2 + } + + if _opmaskState && _zmmHI256State && _hi16ZmmState { + avx512 = (_avx512f && + _avx512dq && + _avx512bw && + _avx512vl) + } +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.go b/vendor/github.com/minio/sha256-simd/cpuid_386.go new file mode 100644 index 000000000..c9890be47 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_386.go @@ -0,0 +1,24 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func xgetbv(index uint32) (eax, edx uint32) + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_386.s b/vendor/github.com/minio/sha256-simd/cpuid_386.s new file mode 100644 index 000000000..1511cd6f6 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_386.s @@ -0,0 +1,53 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015 Klaus Post +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// +build 386,!gccgo + +// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·xgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.go b/vendor/github.com/minio/sha256-simd/cpuid_amd64.go new file mode 100644 index 000000000..c9890be47 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_amd64.go @@ -0,0 +1,24 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func xgetbv(index uint32) (eax, edx uint32) + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_amd64.s b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s new file mode 100644 index 000000000..b0f414748 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_amd64.s @@ -0,0 +1,53 @@ +// The MIT License (MIT) +// +// Copyright (c) 2015 Klaus Post +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// +build amd64,!gccgo + +// func cpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·cpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·xgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/vendor/github.com/minio/sha256-simd/cpuid_arm.go b/vendor/github.com/minio/sha256-simd/cpuid_arm.go new file mode 100644 index 000000000..351dff4b6 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_arm.go @@ -0,0 +1,32 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go new file mode 100644 index 000000000..e739996d9 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_linux_arm64.go @@ -0,0 +1,49 @@ +// +build arm64,linux + +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +import ( + "bytes" + "io/ioutil" +) + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +// File to check for cpu capabilities. +const procCPUInfo = "/proc/cpuinfo" + +// Feature to check for. +const sha256Feature = "sha2" + +func haveArmSha() bool { + cpuInfo, err := ioutil.ReadFile(procCPUInfo) + if err != nil { + return false + } + return bytes.Contains(cpuInfo, []byte(sha256Feature)) +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_other.go b/vendor/github.com/minio/sha256-simd/cpuid_other.go new file mode 100644 index 000000000..04f26ce88 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_other.go @@ -0,0 +1,34 @@ +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +build ppc64 ppc64le mips mipsle mips64 mips64le s390x wasm + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go b/vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go new file mode 100644 index 000000000..0fb4022f7 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/cpuid_others_arm64.go @@ -0,0 +1,35 @@ +// +build arm64,!linux + +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package sha256 + +func cpuid(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func cpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 +} + +func xgetbv(index uint32) (eax, edx uint32) { + return 0, 0 +} + +// Check for sha2 instruction flag. +func haveArmSha() bool { + return false +} diff --git a/vendor/github.com/minio/sha256-simd/go.mod b/vendor/github.com/minio/sha256-simd/go.mod new file mode 100644 index 000000000..b68fb0a05 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/go.mod @@ -0,0 +1 @@ +module github.com/minio/sha256-simd diff --git a/vendor/github.com/minio/sha256-simd/sha256.go b/vendor/github.com/minio/sha256-simd/sha256.go new file mode 100644 index 000000000..4e1f6d2f7 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256.go @@ -0,0 +1,409 @@ +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "crypto/sha256" + "encoding/binary" + "hash" + "runtime" +) + +// Size - The size of a SHA256 checksum in bytes. +const Size = 32 + +// BlockSize - The blocksize of SHA256 in bytes. +const BlockSize = 64 + +const ( + chunk = BlockSize + init0 = 0x6A09E667 + init1 = 0xBB67AE85 + init2 = 0x3C6EF372 + init3 = 0xA54FF53A + init4 = 0x510E527F + init5 = 0x9B05688C + init6 = 0x1F83D9AB + init7 = 0x5BE0CD19 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [8]uint32 + x [chunk]byte + nx int + len uint64 +} + +// Reset digest back to default +func (d *digest) Reset() { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.h[5] = init5 + d.h[6] = init6 + d.h[7] = init7 + d.nx = 0 + d.len = 0 +} + +type blockfuncType int + +const ( + blockfuncGeneric blockfuncType = iota + blockfuncAvx512 blockfuncType = iota + blockfuncAvx2 blockfuncType = iota + blockfuncAvx blockfuncType = iota + blockfuncSsse blockfuncType = iota + blockfuncSha blockfuncType = iota + blockfuncArm blockfuncType = iota +) + +var blockfunc blockfuncType + +func init() { + is386bit := runtime.GOARCH == "386" + isARM := runtime.GOARCH == "arm" + switch { + case is386bit || isARM: + blockfunc = blockfuncGeneric + case sha && ssse3 && sse41: + blockfunc = blockfuncSha + case avx2: + blockfunc = blockfuncAvx2 + case avx: + blockfunc = blockfuncAvx + case ssse3: + blockfunc = blockfuncSsse + case armSha: + blockfunc = blockfuncArm + default: + blockfunc = blockfuncGeneric + } +} + +// New returns a new hash.Hash computing the SHA256 checksum. +func New() hash.Hash { + if blockfunc != blockfuncGeneric { + d := new(digest) + d.Reset() + return d + } + // Fallback to the standard golang implementation + // if no features were found. + return sha256.New() +} + +// Sum256 - single caller sha256 helper +func Sum256(data []byte) (result [Size]byte) { + var d digest + d.Reset() + d.Write(data) + result = d.checkSum() + return +} + +// Return size of checksum +func (d *digest) Size() int { return Size } + +// Return blocksize of checksum +func (d *digest) BlockSize() int { return BlockSize } + +// Write to digest +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Return sha256 sum in bytes +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...) +} + +// Intermediate checksum function +func (d *digest) checkSum() (digest [Size]byte) { + n := d.nx + + var k [64]byte + copy(k[:], d.x[:n]) + + k[n] = 0x80 + + if n >= 56 { + block(d, k[:]) + + // clear block buffer - go compiles this to optimal 1x xorps + 4x movups + // unfortunately expressing this more succinctly results in much worse code + k[0] = 0 + k[1] = 0 + k[2] = 0 + k[3] = 0 + k[4] = 0 + k[5] = 0 + k[6] = 0 + k[7] = 0 + k[8] = 0 + k[9] = 0 + k[10] = 0 + k[11] = 0 + k[12] = 0 + k[13] = 0 + k[14] = 0 + k[15] = 0 + k[16] = 0 + k[17] = 0 + k[18] = 0 + k[19] = 0 + k[20] = 0 + k[21] = 0 + k[22] = 0 + k[23] = 0 + k[24] = 0 + k[25] = 0 + k[26] = 0 + k[27] = 0 + k[28] = 0 + k[29] = 0 + k[30] = 0 + k[31] = 0 + k[32] = 0 + k[33] = 0 + k[34] = 0 + k[35] = 0 + k[36] = 0 + k[37] = 0 + k[38] = 0 + k[39] = 0 + k[40] = 0 + k[41] = 0 + k[42] = 0 + k[43] = 0 + k[44] = 0 + k[45] = 0 + k[46] = 0 + k[47] = 0 + k[48] = 0 + k[49] = 0 + k[50] = 0 + k[51] = 0 + k[52] = 0 + k[53] = 0 + k[54] = 0 + k[55] = 0 + k[56] = 0 + k[57] = 0 + k[58] = 0 + k[59] = 0 + k[60] = 0 + k[61] = 0 + k[62] = 0 + k[63] = 0 + } + binary.BigEndian.PutUint64(k[56:64], uint64(d.len)<<3) + block(d, k[:]) + + { + const i = 0 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 1 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 2 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 3 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 4 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 5 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 6 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + { + const i = 7 + binary.BigEndian.PutUint32(digest[i*4:i*4+4], d.h[i]) + } + + return +} + +func block(dig *digest, p []byte) { + if blockfunc == blockfuncSha { + blockShaGo(dig, p) + } else if blockfunc == blockfuncAvx2 { + blockAvx2Go(dig, p) + } else if blockfunc == blockfuncAvx { + blockAvxGo(dig, p) + } else if blockfunc == blockfuncSsse { + blockSsseGo(dig, p) + } else if blockfunc == blockfuncArm { + blockArmGo(dig, p) + } else if blockfunc == blockfuncGeneric { + blockGeneric(dig, p) + } +} + +func blockGeneric(dig *digest, p []byte) { + var w [64]uint32 + h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + for i := 16; i < 64; i++ { + v1 := w[i-2] + t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10) + v2 := w[i-15] + t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3) + w[i] = t1 + w[i-7] + t2 + w[i-16] + } + + a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7 + + for i := 0; i < 64; i++ { + t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i] + + t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c)) + + h = g + g = f + f = e + e = d + t1 + d = c + c = b + b = a + a = t1 + t2 + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + h5 += f + h6 += g + h7 += h + + p = p[chunk:] + } + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7 +} + +var _K = []uint32{ + 0x428a2f98, + 0x71374491, + 0xb5c0fbcf, + 0xe9b5dba5, + 0x3956c25b, + 0x59f111f1, + 0x923f82a4, + 0xab1c5ed5, + 0xd807aa98, + 0x12835b01, + 0x243185be, + 0x550c7dc3, + 0x72be5d74, + 0x80deb1fe, + 0x9bdc06a7, + 0xc19bf174, + 0xe49b69c1, + 0xefbe4786, + 0x0fc19dc6, + 0x240ca1cc, + 0x2de92c6f, + 0x4a7484aa, + 0x5cb0a9dc, + 0x76f988da, + 0x983e5152, + 0xa831c66d, + 0xb00327c8, + 0xbf597fc7, + 0xc6e00bf3, + 0xd5a79147, + 0x06ca6351, + 0x14292967, + 0x27b70a85, + 0x2e1b2138, + 0x4d2c6dfc, + 0x53380d13, + 0x650a7354, + 0x766a0abb, + 0x81c2c92e, + 0x92722c85, + 0xa2bfe8a1, + 0xa81a664b, + 0xc24b8b70, + 0xc76c51a3, + 0xd192e819, + 0xd6990624, + 0xf40e3585, + 0x106aa070, + 0x19a4c116, + 0x1e376c08, + 0x2748774c, + 0x34b0bcb5, + 0x391c0cb3, + 0x4ed8aa4a, + 0x5b9cca4f, + 0x682e6ff3, + 0x748f82ee, + 0x78a5636f, + 0x84c87814, + 0x8cc70208, + 0x90befffa, + 0xa4506ceb, + 0xbef9a3f7, + 0xc67178f2, +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go new file mode 100644 index 000000000..43ee7a948 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.go @@ -0,0 +1,22 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +//go:noescape +func blockAvx2(h []uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s new file mode 100644 index 000000000..80b0b739b --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx2_amd64.s @@ -0,0 +1,1449 @@ +//+build !noasm,!appengine + +// SHA256 implementation for AVX2 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// This code is based on an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// +// together with the reference implementation from the following authors: +// James Guilford +// Kirk Yap +// Tim Chen +// +// For Golang it has been converted to Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 +// equivalents +// + +DATA K256<>+0x000(SB)/8, $0x71374491428a2f98 +DATA K256<>+0x008(SB)/8, $0xe9b5dba5b5c0fbcf +DATA K256<>+0x010(SB)/8, $0x71374491428a2f98 +DATA K256<>+0x018(SB)/8, $0xe9b5dba5b5c0fbcf +DATA K256<>+0x020(SB)/8, $0x59f111f13956c25b +DATA K256<>+0x028(SB)/8, $0xab1c5ed5923f82a4 +DATA K256<>+0x030(SB)/8, $0x59f111f13956c25b +DATA K256<>+0x038(SB)/8, $0xab1c5ed5923f82a4 +DATA K256<>+0x040(SB)/8, $0x12835b01d807aa98 +DATA K256<>+0x048(SB)/8, $0x550c7dc3243185be +DATA K256<>+0x050(SB)/8, $0x12835b01d807aa98 +DATA K256<>+0x058(SB)/8, $0x550c7dc3243185be +DATA K256<>+0x060(SB)/8, $0x80deb1fe72be5d74 +DATA K256<>+0x068(SB)/8, $0xc19bf1749bdc06a7 +DATA K256<>+0x070(SB)/8, $0x80deb1fe72be5d74 +DATA K256<>+0x078(SB)/8, $0xc19bf1749bdc06a7 +DATA K256<>+0x080(SB)/8, $0xefbe4786e49b69c1 +DATA K256<>+0x088(SB)/8, $0x240ca1cc0fc19dc6 +DATA K256<>+0x090(SB)/8, $0xefbe4786e49b69c1 +DATA K256<>+0x098(SB)/8, $0x240ca1cc0fc19dc6 +DATA K256<>+0x0a0(SB)/8, $0x4a7484aa2de92c6f +DATA K256<>+0x0a8(SB)/8, $0x76f988da5cb0a9dc +DATA K256<>+0x0b0(SB)/8, $0x4a7484aa2de92c6f +DATA K256<>+0x0b8(SB)/8, $0x76f988da5cb0a9dc +DATA K256<>+0x0c0(SB)/8, $0xa831c66d983e5152 +DATA K256<>+0x0c8(SB)/8, $0xbf597fc7b00327c8 +DATA K256<>+0x0d0(SB)/8, $0xa831c66d983e5152 +DATA K256<>+0x0d8(SB)/8, $0xbf597fc7b00327c8 +DATA K256<>+0x0e0(SB)/8, $0xd5a79147c6e00bf3 +DATA K256<>+0x0e8(SB)/8, $0x1429296706ca6351 +DATA K256<>+0x0f0(SB)/8, $0xd5a79147c6e00bf3 +DATA K256<>+0x0f8(SB)/8, $0x1429296706ca6351 +DATA K256<>+0x100(SB)/8, $0x2e1b213827b70a85 +DATA K256<>+0x108(SB)/8, $0x53380d134d2c6dfc +DATA K256<>+0x110(SB)/8, $0x2e1b213827b70a85 +DATA K256<>+0x118(SB)/8, $0x53380d134d2c6dfc +DATA K256<>+0x120(SB)/8, $0x766a0abb650a7354 +DATA K256<>+0x128(SB)/8, $0x92722c8581c2c92e +DATA K256<>+0x130(SB)/8, $0x766a0abb650a7354 +DATA K256<>+0x138(SB)/8, $0x92722c8581c2c92e +DATA K256<>+0x140(SB)/8, $0xa81a664ba2bfe8a1 +DATA K256<>+0x148(SB)/8, $0xc76c51a3c24b8b70 +DATA K256<>+0x150(SB)/8, $0xa81a664ba2bfe8a1 +DATA K256<>+0x158(SB)/8, $0xc76c51a3c24b8b70 +DATA K256<>+0x160(SB)/8, $0xd6990624d192e819 +DATA K256<>+0x168(SB)/8, $0x106aa070f40e3585 +DATA K256<>+0x170(SB)/8, $0xd6990624d192e819 +DATA K256<>+0x178(SB)/8, $0x106aa070f40e3585 +DATA K256<>+0x180(SB)/8, $0x1e376c0819a4c116 +DATA K256<>+0x188(SB)/8, $0x34b0bcb52748774c +DATA K256<>+0x190(SB)/8, $0x1e376c0819a4c116 +DATA K256<>+0x198(SB)/8, $0x34b0bcb52748774c +DATA K256<>+0x1a0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA K256<>+0x1a8(SB)/8, $0x682e6ff35b9cca4f +DATA K256<>+0x1b0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA K256<>+0x1b8(SB)/8, $0x682e6ff35b9cca4f +DATA K256<>+0x1c0(SB)/8, $0x78a5636f748f82ee +DATA K256<>+0x1c8(SB)/8, $0x8cc7020884c87814 +DATA K256<>+0x1d0(SB)/8, $0x78a5636f748f82ee +DATA K256<>+0x1d8(SB)/8, $0x8cc7020884c87814 +DATA K256<>+0x1e0(SB)/8, $0xa4506ceb90befffa +DATA K256<>+0x1e8(SB)/8, $0xc67178f2bef9a3f7 +DATA K256<>+0x1f0(SB)/8, $0xa4506ceb90befffa +DATA K256<>+0x1f8(SB)/8, $0xc67178f2bef9a3f7 + +DATA K256<>+0x200(SB)/8, $0x0405060700010203 +DATA K256<>+0x208(SB)/8, $0x0c0d0e0f08090a0b +DATA K256<>+0x210(SB)/8, $0x0405060700010203 +DATA K256<>+0x218(SB)/8, $0x0c0d0e0f08090a0b +DATA K256<>+0x220(SB)/8, $0x0b0a090803020100 +DATA K256<>+0x228(SB)/8, $0xffffffffffffffff +DATA K256<>+0x230(SB)/8, $0x0b0a090803020100 +DATA K256<>+0x238(SB)/8, $0xffffffffffffffff +DATA K256<>+0x240(SB)/8, $0xffffffffffffffff +DATA K256<>+0x248(SB)/8, $0x0b0a090803020100 +DATA K256<>+0x250(SB)/8, $0xffffffffffffffff +DATA K256<>+0x258(SB)/8, $0x0b0a090803020100 + +GLOBL K256<>(SB), 8, $608 + +// We need 0x220 stack space aligned on a 512 boundary, so for the +// worstcase-aligned SP we need twice this amount, being 1088 (=0x440) +// +// SP aligned end-aligned stacksize +// 100013d0 10001400 10001620 592 +// 100013d8 10001400 10001620 584 +// 100013e0 10001600 10001820 1088 +// 100013e8 10001600 10001820 1080 + +// func blockAvx2(h []uint32, message []uint8) +TEXT ·blockAvx2(SB),$1088-48 + + MOVQ h+0(FP), DI // DI: &h + MOVQ message_base+24(FP), SI // SI: &message + MOVQ message_len+32(FP), DX // len(message) + ADDQ SI, DX // end pointer of input + MOVQ SP, R11 // copy stack pointer + ADDQ $0x220, SP // sp += 0x220 + ANDQ $0xfffffffffffffe00, SP // align stack frame + ADDQ $0x1c0, SP + MOVQ DI, 0x40(SP) // save ctx + MOVQ SI, 0x48(SP) // save input + MOVQ DX, 0x50(SP) // save end pointer + MOVQ R11, 0x58(SP) // save copy of stack pointer + + WORD $0xf8c5; BYTE $0x77 // vzeroupper + ADDQ $0x40, SI // input++ + MOVL (DI), AX + MOVQ SI, R12 // borrow $T1 + MOVL 4(DI), BX + CMPQ SI, DX // $_end + MOVL 8(DI), CX + LONG $0xe4440f4c // cmove r12,rsp /* next block or random data */ + MOVL 12(DI), DX + MOVL 16(DI), R8 + MOVL 20(DI), R9 + MOVL 24(DI), R10 + MOVL 28(DI), R11 + + LEAQ K256<>(SB), BP + LONG $0x856f7dc5; LONG $0x00000220 // VMOVDQA YMM8, 0x220[rbp] /* vmovdqa ymm8,YMMWORD PTR [rip+0x220] */ + LONG $0x8d6f7dc5; LONG $0x00000240 // VMOVDQA YMM9, 0x240[rbp] /* vmovdqa ymm9,YMMWORD PTR [rip+0x240] */ + LONG $0x956f7dc5; LONG $0x00000200 // VMOVDQA YMM10, 0x200[rbp] /* vmovdqa ymm7,YMMWORD PTR [rip+0x200] */ + +loop0: + LONG $0x6f7dc1c4; BYTE $0xfa // VMOVDQA YMM7, YMM10 + + // Load first 16 dwords from two blocks + MOVOU -64(SI), X0 // vmovdqu xmm0,XMMWORD PTR [rsi-0x40] + MOVOU -48(SI), X1 // vmovdqu xmm1,XMMWORD PTR [rsi-0x30] + MOVOU -32(SI), X2 // vmovdqu xmm2,XMMWORD PTR [rsi-0x20] + MOVOU -16(SI), X3 // vmovdqu xmm3,XMMWORD PTR [rsi-0x10] + + // Byte swap data and transpose data into high/low + LONG $0x387dc3c4; WORD $0x2404; BYTE $0x01 // vinserti128 ymm0,ymm0,[r12],0x1 + LONG $0x3875c3c4; LONG $0x0110244c // vinserti128 ymm1,ymm1,0x10[r12],0x1 + LONG $0x007de2c4; BYTE $0xc7 // vpshufb ymm0,ymm0,ymm7 + LONG $0x386dc3c4; LONG $0x01202454 // vinserti128 ymm2,ymm2,0x20[r12],0x1 + LONG $0x0075e2c4; BYTE $0xcf // vpshufb ymm1,ymm1,ymm7 + LONG $0x3865c3c4; LONG $0x0130245c // vinserti128 ymm3,ymm3,0x30[r12],0x1 + + LEAQ K256<>(SB), BP + LONG $0x006de2c4; BYTE $0xd7 // vpshufb ymm2,ymm2,ymm7 + LONG $0x65fefdc5; BYTE $0x00 // vpaddd ymm4,ymm0,[rbp] + LONG $0x0065e2c4; BYTE $0xdf // vpshufb ymm3,ymm3,ymm7 + LONG $0x6dfef5c5; BYTE $0x20 // vpaddd ymm5,ymm1,0x20[rbp] + LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,0x40[rbp] + LONG $0x7dfee5c5; BYTE $0x60 // vpaddd ymm7,ymm3,0x60[rbp] + + LONG $0x247ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm4 + XORQ R14, R14 + LONG $0x6c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm5 + + ADDQ $-0x40, SP + MOVQ BX, DI + LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 + XORQ CX, DI // magic + LONG $0x7c7ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm7 + MOVQ R9, R12 + ADDQ $0x80, BP + +loop1: + // Schedule 48 input dwords, by doing 3 rounds of 12 each + // Note: SIMD instructions are interleaved with the SHA calculations + ADDQ $-0x40, SP + LONG $0x0f75e3c4; WORD $0x04e0 // vpalignr ymm4,ymm1,ymm0,0x4 + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) + LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0x0f65e3c4; WORD $0x04fa // vpalignr ymm7,ymm3,ymm2,0x4 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0xc7fefdc5 // vpaddd ymm0,ymm0,ymm7 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + LONG $0xfb70fdc5; BYTE $0xfa // vpshufd ymm7,ymm3,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) + LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + LONG $0xc4fefdc5 // vpaddd ymm0,ymm0,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) + LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf870fdc5; BYTE $0x50 // vpshufd ymm7,ymm0,0x50 + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) + LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xc6fefdc5 // vpaddd ymm0,ymm0,ymm6 + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0x75fefdc5; BYTE $0x00 // vpaddd ymm6,ymm0,[rbp+0x0] + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 + LONG $0x0f6de3c4; WORD $0x04e1 // vpalignr ymm4,ymm2,ymm1,0x4 + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) + LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0x0f7de3c4; WORD $0x04fb // vpalignr ymm7,ymm0,ymm3,0x4 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0xcffef5c5 // vpaddd ymm1,ymm1,ymm7 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + LONG $0xf870fdc5; BYTE $0xfa // vpshufd ymm7,ymm0,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) + LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + LONG $0xccfef5c5 // vpaddd ymm1,ymm1,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) + LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf970fdc5; BYTE $0x50 // vpshufd ymm7,ymm1,0x50 + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) + LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xcefef5c5 // vpaddd ymm1,ymm1,ymm6 + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0x75fef5c5; BYTE $0x20 // vpaddd ymm6,ymm1,[rbp+0x20] + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 + + LONG $0x24648d48; BYTE $0xc0 // lea rsp,[rsp-0x40] + LONG $0x0f65e3c4; WORD $0x04e2 // vpalignr ymm4,ymm3,ymm2,0x4 + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x80) + LONG $0x249c0344; LONG $0x00000080 // add r11d,[rsp+0x80] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0x0f75e3c4; WORD $0x04f8 // vpalignr ymm7,ymm1,ymm0,0x4 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0xd7feedc5 // vpaddd ymm2,ymm2,ymm7 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + LONG $0xf970fdc5; BYTE $0xfa // vpshufd ymm7,ymm1,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x84) + LONG $0x24940344; LONG $0x00000084 // add r10d,[rsp+0x84] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + LONG $0xd4feedc5 // vpaddd ymm2,ymm2,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x88) + LONG $0x248c0344; LONG $0x00000088 // add r9d,[rsp+0x88] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xfa70fdc5; BYTE $0x50 // vpshufd ymm7,ymm2,0x50 + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x8c) + LONG $0x24840344; LONG $0x0000008c // add r8d,[rsp+0x8c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xd6feedc5 // vpaddd ymm2,ymm2,ymm6 + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0x75feedc5; BYTE $0x40 // vpaddd ymm6,ymm2,[rbp+0x40] + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + LONG $0x347ffdc5; BYTE $0x24 // vmovdqa [rsp],ymm6 + LONG $0x0f7de3c4; WORD $0x04e3 // vpalignr ymm4,ymm0,ymm3,0x4 + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0xa0) + LONG $0xa0249403; WORD $0x0000; BYTE $0x00 // add edx,[rsp+0xa0] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0x0f6de3c4; WORD $0x04f9 // vpalignr ymm7,ymm2,ymm1,0x4 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xd472cdc5; BYTE $0x07 // vpsrld ymm6,ymm4,0x7 + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0xdffee5c5 // vpaddd ymm3,ymm3,ymm7 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xd472c5c5; BYTE $0x03 // vpsrld ymm7,ymm4,0x3 + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf472d5c5; BYTE $0x0e // vpslld ymm5,ymm4,0xe + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + LONG $0xe6efc5c5 // vpxor ymm4,ymm7,ymm6 + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + LONG $0xfa70fdc5; BYTE $0xfa // vpshufd ymm7,ymm2,0xfa + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + LONG $0xd672cdc5; BYTE $0x0b // vpsrld ymm6,ymm6,0xb + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0xa4) + LONG $0xa4248c03; WORD $0x0000; BYTE $0x00 // add ecx,[rsp+0xa4] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf572d5c5; BYTE $0x0b // vpslld ymm5,ymm5,0xb + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0xe6efddc5 // vpxor ymm4,ymm4,ymm6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xe5efddc5 // vpxor ymm4,ymm4,ymm5 + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + LONG $0xdcfee5c5 // vpaddd ymm3,ymm3,ymm4 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0xa8) + LONG $0xa8249c03; WORD $0x0000; BYTE $0x00 // add ebx,[rsp+0xa8] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x004dc2c4; BYTE $0xf0 // vpshufb ymm6,ymm6,ymm8 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xfb70fdc5; BYTE $0x50 // vpshufd ymm7,ymm3,0x50 + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + LONG $0xd772cdc5; BYTE $0x0a // vpsrld ymm6,ymm7,0xa + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + LONG $0xd773c5c5; BYTE $0x11 // vpsrlq ymm7,ymm7,0x11 + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0xac) + LONG $0xac248403; WORD $0x0000; BYTE $0x00 // add eax,[rsp+0xac] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xd773c5c5; BYTE $0x02 // vpsrlq ymm7,ymm7,0x2 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf7efcdc5 // vpxor ymm6,ymm6,ymm7 + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x004dc2c4; BYTE $0xf1 // vpshufb ymm6,ymm6,ymm9 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xdefee5c5 // vpaddd ymm3,ymm3,ymm6 + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0x75fee5c5; BYTE $0x60 // vpaddd ymm6,ymm3,[rbp+0x60] + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + LONG $0x747ffdc5; WORD $0x2024 // vmovdqa [rsp+0x20],ymm6 + ADDQ $0x80, BP + + CMPB 0x3(BP), $0x0 + JNE loop1 + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x40) + LONG $0x245c0344; BYTE $0x40 // add r11d,[rsp+0x40] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x44) + LONG $0x24540344; BYTE $0x44 // add r10d,[rsp+0x44] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x48) + LONG $0x244c0344; BYTE $0x48 // add r9d,[rsp+0x48] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x4c) + LONG $0x24440344; BYTE $0x4c // add r8d,[rsp+0x4c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x60) + LONG $0x60245403 // add edx,[rsp+0x60] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x64) + LONG $0x64244c03 // add ecx,[rsp+0x64] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x68) + LONG $0x68245c03 // add ebx,[rsp+0x68] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x6c) + LONG $0x6c244403 // add eax,[rsp+0x6c] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, SP, 0x00) + LONG $0x241c0344 // add r11d,[rsp] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, SP, 0x04) + LONG $0x24540344; BYTE $0x04 // add r10d,[rsp+0x4] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, SP, 0x08) + LONG $0x244c0344; BYTE $0x08 // add r9d,[rsp+0x8] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, SP, 0x0c) + LONG $0x24440344; BYTE $0x0c // add r8d,[rsp+0xc] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, SP, 0x20) + LONG $0x20245403 // add edx,[rsp+0x20] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, SP, 0x24) + LONG $0x24244c03 // add ecx,[rsp+0x24] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, SP, 0x28) + LONG $0x28245c03 // add ebx,[rsp+0x28] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, SP, 0x2c) + LONG $0x2c244403 // add eax,[rsp+0x2c] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + MOVQ 0x200(SP), DI // $_ctx + ADDQ R14, AX + + LEAQ 0x1c0(SP), BP + + ADDL (DI), AX + ADDL 4(DI), BX + ADDL 8(DI), CX + ADDL 12(DI), DX + ADDL 16(DI), R8 + ADDL 20(DI), R9 + ADDL 24(DI), R10 + ADDL 28(DI), R11 + + MOVL AX, (DI) + MOVL BX, 4(DI) + MOVL CX, 8(DI) + MOVL DX, 12(DI) + MOVL R8, 16(DI) + MOVL R9, 20(DI) + MOVL R10, 24(DI) + MOVL R11, 28(DI) + + CMPQ SI, 0x50(BP) // $_end + JE done + + XORQ R14, R14 + MOVQ BX, DI + XORQ CX, DI // magic + MOVQ R9, R12 + +loop2: + // ROUND(AX, BX, CX, DX, R8, R9, R10, R11, R12, R13, R14, R15, DI, BP, 0x10) + LONG $0x105d0344 // add r11d,[rbp+0x10] + WORD $0x2145; BYTE $0xc4 // and r12d,r8d + LONG $0xf07b43c4; WORD $0x19e8 // rorx r13d,r8d,0x19 + LONG $0xf07b43c4; WORD $0x0bf8 // rorx r15d,r8d,0xb + LONG $0x30048d42 // lea eax,[rax+r14*1] + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + LONG $0xf23842c4; BYTE $0xe2 // andn r12d,r8d,r10d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f0 // rorx r14d,r8d,0x6 + LONG $0x231c8d47 // lea r11d,[r11+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xc7 // mov r15d,eax + LONG $0xf07b63c4; WORD $0x16e0 // rorx r12d,eax,0x16 + LONG $0x2b1c8d47 // lea r11d,[r11+r13*1] + WORD $0x3141; BYTE $0xdf // xor r15d,ebx + LONG $0xf07b63c4; WORD $0x0df0 // rorx r14d,eax,0xd + LONG $0xf07b63c4; WORD $0x02e8 // rorx r13d,eax,0x2 + LONG $0x1a148d42 // lea edx,[rdx+r11*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xdf31 // xor edi,ebx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3b1c8d45 // lea r11d,[r11+rdi*1] + WORD $0x8945; BYTE $0xc4 // mov r12d,r8d + + // ROUND(R11, AX, BX, CX, DX, R8, R9, R10, R12, R13, R14, DI, R15, BP, 0x14) + LONG $0x14550344 // add r10d,[rbp+0x14] + WORD $0x2141; BYTE $0xd4 // and r12d,edx + LONG $0xf07b63c4; WORD $0x19ea // rorx r13d,edx,0x19 + LONG $0xf07be3c4; WORD $0x0bfa // rorx edi,edx,0xb + LONG $0x331c8d47 // lea r11d,[r11+r14*1] + LONG $0x22148d47 // lea r10d,[r10+r12*1] + LONG $0xf26842c4; BYTE $0xe1 // andn r12d,edx,r9d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f2 // rorx r14d,edx,0x6 + LONG $0x22148d47 // lea r10d,[r10+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xdf // mov edi,r11d + LONG $0xf07b43c4; WORD $0x16e3 // rorx r12d,r11d,0x16 + LONG $0x2a148d47 // lea r10d,[r10+r13*1] + WORD $0xc731 // xor edi,eax + LONG $0xf07b43c4; WORD $0x0df3 // rorx r14d,r11d,0xd + LONG $0xf07b43c4; WORD $0x02eb // rorx r13d,r11d,0x2 + LONG $0x110c8d42 // lea ecx,[rcx+r10*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xc7 // xor r15d,eax + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x3a148d47 // lea r10d,[r10+r15*1] + WORD $0x8941; BYTE $0xd4 // mov r12d,edx + + // ROUND(R10, R11, AX, BX, CX, DX, R8, R9, R12, R13, R14, R15, DI, BP, 0x18) + LONG $0x184d0344 // add r9d,[rbp+0x18] + WORD $0x2141; BYTE $0xcc // and r12d,ecx + LONG $0xf07b63c4; WORD $0x19e9 // rorx r13d,ecx,0x19 + LONG $0xf07b63c4; WORD $0x0bf9 // rorx r15d,ecx,0xb + LONG $0x32148d47 // lea r10d,[r10+r14*1] + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + LONG $0xf27042c4; BYTE $0xe0 // andn r12d,ecx,r8d + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f1 // rorx r14d,ecx,0x6 + LONG $0x210c8d47 // lea r9d,[r9+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xd7 // mov r15d,r10d + LONG $0xf07b43c4; WORD $0x16e2 // rorx r12d,r10d,0x16 + LONG $0x290c8d47 // lea r9d,[r9+r13*1] + WORD $0x3145; BYTE $0xdf // xor r15d,r11d + LONG $0xf07b43c4; WORD $0x0df2 // rorx r14d,r10d,0xd + LONG $0xf07b43c4; WORD $0x02ea // rorx r13d,r10d,0x2 + LONG $0x0b1c8d42 // lea ebx,[rbx+r9*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xdf // xor edi,r11d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d45 // lea r9d,[r9+rdi*1] + WORD $0x8941; BYTE $0xcc // mov r12d,ecx + + // ROUND(R9, R10, R11, AX, BX, CX, DX, R8, R12, R13, R14, DI, R15, BP, 0x1c) + LONG $0x1c450344 // add r8d,[rbp+0x1c] + WORD $0x2141; BYTE $0xdc // and r12d,ebx + LONG $0xf07b63c4; WORD $0x19eb // rorx r13d,ebx,0x19 + LONG $0xf07be3c4; WORD $0x0bfb // rorx edi,ebx,0xb + LONG $0x310c8d47 // lea r9d,[r9+r14*1] + LONG $0x20048d47 // lea r8d,[r8+r12*1] + LONG $0xf26062c4; BYTE $0xe2 // andn r12d,ebx,edx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b63c4; WORD $0x06f3 // rorx r14d,ebx,0x6 + LONG $0x20048d47 // lea r8d,[r8+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8944; BYTE $0xcf // mov edi,r9d + LONG $0xf07b43c4; WORD $0x16e1 // rorx r12d,r9d,0x16 + LONG $0x28048d47 // lea r8d,[r8+r13*1] + WORD $0x3144; BYTE $0xd7 // xor edi,r10d + LONG $0xf07b43c4; WORD $0x0df1 // rorx r14d,r9d,0xd + LONG $0xf07b43c4; WORD $0x02e9 // rorx r13d,r9d,0x2 + LONG $0x00048d42 // lea eax,[rax+r8*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xd7 // xor r15d,r10d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d47 // lea r8d,[r8+r15*1] + WORD $0x8941; BYTE $0xdc // mov r12d,ebx + + // ROUND(R8, R9, R10, R11, AX, BX, CX, DX, R12, R13, R14, R15, DI, BP, 0x30) + WORD $0x5503; BYTE $0x30 // add edx,[rbp+0x30] + WORD $0x2141; BYTE $0xc4 // and r12d,eax + LONG $0xf07b63c4; WORD $0x19e8 // rorx r13d,eax,0x19 + LONG $0xf07b63c4; WORD $0x0bf8 // rorx r15d,eax,0xb + LONG $0x30048d47 // lea r8d,[r8+r14*1] + LONG $0x22148d42 // lea edx,[rdx+r12*1] + LONG $0xf27862c4; BYTE $0xe1 // andn r12d,eax,ecx + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b63c4; WORD $0x06f0 // rorx r14d,eax,0x6 + LONG $0x22148d42 // lea edx,[rdx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8945; BYTE $0xc7 // mov r15d,r8d + LONG $0xf07b43c4; WORD $0x16e0 // rorx r12d,r8d,0x16 + LONG $0x2a148d42 // lea edx,[rdx+r13*1] + WORD $0x3145; BYTE $0xcf // xor r15d,r9d + LONG $0xf07b43c4; WORD $0x0df0 // rorx r14d,r8d,0xd + LONG $0xf07b43c4; WORD $0x02e8 // rorx r13d,r8d,0x2 + LONG $0x131c8d45 // lea r11d,[r11+rdx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3144; BYTE $0xcf // xor edi,r9d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x148d; BYTE $0x3a // lea edx,[rdx+rdi*1] + WORD $0x8941; BYTE $0xc4 // mov r12d,eax + + // ROUND(DX, R8, R9, R10, R11, AX, BX, CX, R12, R13, R14, DI, R15, BP, 0x34) + WORD $0x4d03; BYTE $0x34 // add ecx,[rbp+0x34] + WORD $0x2145; BYTE $0xdc // and r12d,r11d + LONG $0xf07b43c4; WORD $0x19eb // rorx r13d,r11d,0x19 + LONG $0xf07bc3c4; WORD $0x0bfb // rorx edi,r11d,0xb + LONG $0x32148d42 // lea edx,[rdx+r14*1] + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + LONG $0xf22062c4; BYTE $0xe3 // andn r12d,r11d,ebx + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f3 // rorx r14d,r11d,0x6 + LONG $0x210c8d42 // lea ecx,[rcx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xd789 // mov edi,edx + LONG $0xf07b63c4; WORD $0x16e2 // rorx r12d,edx,0x16 + LONG $0x290c8d42 // lea ecx,[rcx+r13*1] + WORD $0x3144; BYTE $0xc7 // xor edi,r8d + LONG $0xf07b63c4; WORD $0x0df2 // rorx r14d,edx,0xd + LONG $0xf07b63c4; WORD $0x02ea // rorx r13d,edx,0x2 + LONG $0x0a148d45 // lea r10d,[r10+rcx*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3145; BYTE $0xc7 // xor r15d,r8d + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x390c8d42 // lea ecx,[rcx+r15*1] + WORD $0x8945; BYTE $0xdc // mov r12d,r11d + + // ROUND(CX, DX, R8, R9, R10, R11, AX, BX, R12, R13, R14, R15, DI, BP, 0x38) + WORD $0x5d03; BYTE $0x38 // add ebx,[rbp+0x38] + WORD $0x2145; BYTE $0xd4 // and r12d,r10d + LONG $0xf07b43c4; WORD $0x19ea // rorx r13d,r10d,0x19 + LONG $0xf07b43c4; WORD $0x0bfa // rorx r15d,r10d,0xb + LONG $0x310c8d42 // lea ecx,[rcx+r14*1] + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + LONG $0xf22862c4; BYTE $0xe0 // andn r12d,r10d,eax + WORD $0x3145; BYTE $0xfd // xor r13d,r15d + LONG $0xf07b43c4; WORD $0x06f2 // rorx r14d,r10d,0x6 + LONG $0x231c8d42 // lea ebx,[rbx+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0x8941; BYTE $0xcf // mov r15d,ecx + LONG $0xf07b63c4; WORD $0x16e1 // rorx r12d,ecx,0x16 + LONG $0x2b1c8d42 // lea ebx,[rbx+r13*1] + WORD $0x3141; BYTE $0xd7 // xor r15d,edx + LONG $0xf07b63c4; WORD $0x0df1 // rorx r14d,ecx,0xd + LONG $0xf07b63c4; WORD $0x02e9 // rorx r13d,ecx,0x2 + LONG $0x190c8d45 // lea r9d,[r9+rbx*1] + WORD $0x2144; BYTE $0xff // and edi,r15d + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0xd731 // xor edi,edx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + WORD $0x1c8d; BYTE $0x3b // lea ebx,[rbx+rdi*1] + WORD $0x8945; BYTE $0xd4 // mov r12d,r10d + + // ROUND(BX, CX, DX, R8, R9, R10, R11, AX, R12, R13, R14, DI, R15, BP, 0x3c) + WORD $0x4503; BYTE $0x3c // add eax,[rbp+0x3c] + WORD $0x2145; BYTE $0xcc // and r12d,r9d + LONG $0xf07b43c4; WORD $0x19e9 // rorx r13d,r9d,0x19 + LONG $0xf07bc3c4; WORD $0x0bf9 // rorx edi,r9d,0xb + LONG $0x331c8d42 // lea ebx,[rbx+r14*1] + LONG $0x20048d42 // lea eax,[rax+r12*1] + LONG $0xf23042c4; BYTE $0xe3 // andn r12d,r9d,r11d + WORD $0x3141; BYTE $0xfd // xor r13d,edi + LONG $0xf07b43c4; WORD $0x06f1 // rorx r14d,r9d,0x6 + LONG $0x20048d42 // lea eax,[rax+r12*1] + WORD $0x3145; BYTE $0xf5 // xor r13d,r14d + WORD $0xdf89 // mov edi,ebx + LONG $0xf07b63c4; WORD $0x16e3 // rorx r12d,ebx,0x16 + LONG $0x28048d42 // lea eax,[rax+r13*1] + WORD $0xcf31 // xor edi,ecx + LONG $0xf07b63c4; WORD $0x0df3 // rorx r14d,ebx,0xd + LONG $0xf07b63c4; WORD $0x02eb // rorx r13d,ebx,0x2 + LONG $0x00048d45 // lea r8d,[r8+rax*1] + WORD $0x2141; BYTE $0xff // and r15d,edi + WORD $0x3145; BYTE $0xe6 // xor r14d,r12d + WORD $0x3141; BYTE $0xcf // xor r15d,ecx + WORD $0x3145; BYTE $0xee // xor r14d,r13d + LONG $0x38048d42 // lea eax,[rax+r15*1] + WORD $0x8945; BYTE $0xcc // mov r12d,r9d + + ADDQ $-0x40, BP + CMPQ BP, SP + JAE loop2 + + MOVQ 0x200(SP), DI // $_ctx + ADDQ R14, AX + + ADDQ $0x1c0, SP + + ADDL (DI), AX + ADDL 4(DI), BX + ADDL 8(DI), CX + ADDL 12(DI), DX + ADDL 16(DI), R8 + ADDL 20(DI), R9 + + ADDQ $0x80, SI // input += 2 + ADDL 24(DI), R10 + MOVQ SI, R12 + ADDL 28(DI), R11 + CMPQ SI, 0x50(SP) // input == _end + + MOVL AX, (DI) + LONG $0xe4440f4c // cmove r12,rsp /* next block or stale data */ + MOVL AX, (DI) + MOVL BX, 4(DI) + MOVL CX, 8(DI) + MOVL DX, 12(DI) + MOVL R8, 16(DI) + MOVL R9, 20(DI) + MOVL R10, 24(DI) + MOVL R11, 28(DI) + + JBE loop0 + LEAQ (SP), BP + +done: + MOVQ BP, SP + MOVQ 0x58(SP), SP // restore saved stack pointer + WORD $0xf8c5; BYTE $0x77 // vzeroupper + + RET + diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm new file mode 100644 index 000000000..c959b1aa2 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.asm @@ -0,0 +1,686 @@ + +// 16x Parallel implementation of SHA256 for AVX512 + +// +// Minio Cloud Storage, (C) 2017 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// This code is based on the Intel Multi-Buffer Crypto for IPSec library +// and more specifically the following implementation: +// https://github.com/intel/intel-ipsec-mb/blob/master/avx512/sha256_x16_avx512.asm +// +// For Golang it has been converted into Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble the AVX512 instructions +// + +// Copyright (c) 2017, Intel Corporation +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of Intel Corporation nor the names of its contributors +// may be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#define SHA256_DIGEST_ROW_SIZE 64 + +// arg1 +#define STATE rdi +#define STATE_P9 DI +// arg2 +#define INP_SIZE rsi +#define INP_SIZE_P9 SI + +#define IDX rcx +#define TBL rdx +#define TBL_P9 DX + +#define INPUT rax +#define INPUT_P9 AX + +#define inp0 r9 +#define SCRATCH_P9 R12 +#define SCRATCH r12 +#define maskp r13 +#define MASKP_P9 R13 +#define mask r14 +#define MASK_P9 R14 + +#define A zmm0 +#define B zmm1 +#define C zmm2 +#define D zmm3 +#define E zmm4 +#define F zmm5 +#define G zmm6 +#define H zmm7 +#define T1 zmm8 +#define TMP0 zmm9 +#define TMP1 zmm10 +#define TMP2 zmm11 +#define TMP3 zmm12 +#define TMP4 zmm13 +#define TMP5 zmm14 +#define TMP6 zmm15 + +#define W0 zmm16 +#define W1 zmm17 +#define W2 zmm18 +#define W3 zmm19 +#define W4 zmm20 +#define W5 zmm21 +#define W6 zmm22 +#define W7 zmm23 +#define W8 zmm24 +#define W9 zmm25 +#define W10 zmm26 +#define W11 zmm27 +#define W12 zmm28 +#define W13 zmm29 +#define W14 zmm30 +#define W15 zmm31 + + +#define TRANSPOSE16(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _r10, _r11, _r12, _r13, _r14, _r15, _t0, _t1) \ + \ + \ // input r0 = {a15 a14 a13 a12 a11 a10 a9 a8 a7 a6 a5 a4 a3 a2 a1 a0} + \ // r1 = {b15 b14 b13 b12 b11 b10 b9 b8 b7 b6 b5 b4 b3 b2 b1 b0} + \ // r2 = {c15 c14 c13 c12 c11 c10 c9 c8 c7 c6 c5 c4 c3 c2 c1 c0} + \ // r3 = {d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0} + \ // r4 = {e15 e14 e13 e12 e11 e10 e9 e8 e7 e6 e5 e4 e3 e2 e1 e0} + \ // r5 = {f15 f14 f13 f12 f11 f10 f9 f8 f7 f6 f5 f4 f3 f2 f1 f0} + \ // r6 = {g15 g14 g13 g12 g11 g10 g9 g8 g7 g6 g5 g4 g3 g2 g1 g0} + \ // r7 = {h15 h14 h13 h12 h11 h10 h9 h8 h7 h6 h5 h4 h3 h2 h1 h0} + \ // r8 = {i15 i14 i13 i12 i11 i10 i9 i8 i7 i6 i5 i4 i3 i2 i1 i0} + \ // r9 = {j15 j14 j13 j12 j11 j10 j9 j8 j7 j6 j5 j4 j3 j2 j1 j0} + \ // r10 = {k15 k14 k13 k12 k11 k10 k9 k8 k7 k6 k5 k4 k3 k2 k1 k0} + \ // r11 = {l15 l14 l13 l12 l11 l10 l9 l8 l7 l6 l5 l4 l3 l2 l1 l0} + \ // r12 = {m15 m14 m13 m12 m11 m10 m9 m8 m7 m6 m5 m4 m3 m2 m1 m0} + \ // r13 = {n15 n14 n13 n12 n11 n10 n9 n8 n7 n6 n5 n4 n3 n2 n1 n0} + \ // r14 = {o15 o14 o13 o12 o11 o10 o9 o8 o7 o6 o5 o4 o3 o2 o1 o0} + \ // r15 = {p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0} + \ + \ // output r0 = { p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ // r1 = { p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ // r2 = { p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ // r3 = { p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ // r4 = { p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ // r5 = { p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ // r6 = { p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ // r7 = { p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ // r8 = { p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + \ // r9 = { p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + \ + \ // process top half + vshufps _t0, _r0, _r1, 0x44 \ // t0 = {b13 b12 a13 a12 b9 b8 a9 a8 b5 b4 a5 a4 b1 b0 a1 a0} + vshufps _r0, _r0, _r1, 0xEE \ // r0 = {b15 b14 a15 a14 b11 b10 a11 a10 b7 b6 a7 a6 b3 b2 a3 a2} + vshufps _t1, _r2, _r3, 0x44 \ // t1 = {d13 d12 c13 c12 d9 d8 c9 c8 d5 d4 c5 c4 d1 d0 c1 c0} + vshufps _r2, _r2, _r3, 0xEE \ // r2 = {d15 d14 c15 c14 d11 d10 c11 c10 d7 d6 c7 c6 d3 d2 c3 c2} + \ + vshufps _r3, _t0, _t1, 0xDD \ // r3 = {d13 c13 b13 a13 d9 c9 b9 a9 d5 c5 b5 a5 d1 c1 b1 a1} + vshufps _r1, _r0, _r2, 0x88 \ // r1 = {d14 c14 b14 a14 d10 c10 b10 a10 d6 c6 b6 a6 d2 c2 b2 a2} + vshufps _r0, _r0, _r2, 0xDD \ // r0 = {d15 c15 b15 a15 d11 c11 b11 a11 d7 c7 b7 a7 d3 c3 b3 a3} + vshufps _t0, _t0, _t1, 0x88 \ // t0 = {d12 c12 b12 a12 d8 c8 b8 a8 d4 c4 b4 a4 d0 c0 b0 a0} + \ + \ // use r2 in place of t0 + vshufps _r2, _r4, _r5, 0x44 \ // r2 = {f13 f12 e13 e12 f9 f8 e9 e8 f5 f4 e5 e4 f1 f0 e1 e0} + vshufps _r4, _r4, _r5, 0xEE \ // r4 = {f15 f14 e15 e14 f11 f10 e11 e10 f7 f6 e7 e6 f3 f2 e3 e2} + vshufps _t1, _r6, _r7, 0x44 \ // t1 = {h13 h12 g13 g12 h9 h8 g9 g8 h5 h4 g5 g4 h1 h0 g1 g0} + vshufps _r6, _r6, _r7, 0xEE \ // r6 = {h15 h14 g15 g14 h11 h10 g11 g10 h7 h6 g7 g6 h3 h2 g3 g2} + \ + vshufps _r7, _r2, _t1, 0xDD \ // r7 = {h13 g13 f13 e13 h9 g9 f9 e9 h5 g5 f5 e5 h1 g1 f1 e1} + vshufps _r5, _r4, _r6, 0x88 \ // r5 = {h14 g14 f14 e14 h10 g10 f10 e10 h6 g6 f6 e6 h2 g2 f2 e2} + vshufps _r4, _r4, _r6, 0xDD \ // r4 = {h15 g15 f15 e15 h11 g11 f11 e11 h7 g7 f7 e7 h3 g3 f3 e3} + vshufps _r2, _r2, _t1, 0x88 \ // r2 = {h12 g12 f12 e12 h8 g8 f8 e8 h4 g4 f4 e4 h0 g0 f0 e0} + \ + \ // use r6 in place of t0 + vshufps _r6, _r8, _r9, 0x44 \ // r6 = {j13 j12 i13 i12 j9 j8 i9 i8 j5 j4 i5 i4 j1 j0 i1 i0} + vshufps _r8, _r8, _r9, 0xEE \ // r8 = {j15 j14 i15 i14 j11 j10 i11 i10 j7 j6 i7 i6 j3 j2 i3 i2} + vshufps _t1, _r10, _r11, 0x44 \ // t1 = {l13 l12 k13 k12 l9 l8 k9 k8 l5 l4 k5 k4 l1 l0 k1 k0} + vshufps _r10, _r10, _r11, 0xEE \ // r10 = {l15 l14 k15 k14 l11 l10 k11 k10 l7 l6 k7 k6 l3 l2 k3 k2} + \ + vshufps _r11, _r6, _t1, 0xDD \ // r11 = {l13 k13 j13 113 l9 k9 j9 i9 l5 k5 j5 i5 l1 k1 j1 i1} + vshufps _r9, _r8, _r10, 0x88 \ // r9 = {l14 k14 j14 114 l10 k10 j10 i10 l6 k6 j6 i6 l2 k2 j2 i2} + vshufps _r8, _r8, _r10, 0xDD \ // r8 = {l15 k15 j15 115 l11 k11 j11 i11 l7 k7 j7 i7 l3 k3 j3 i3} + vshufps _r6, _r6, _t1, 0x88 \ // r6 = {l12 k12 j12 112 l8 k8 j8 i8 l4 k4 j4 i4 l0 k0 j0 i0} + \ + \ // use r10 in place of t0 + vshufps _r10, _r12, _r13, 0x44 \ // r10 = {n13 n12 m13 m12 n9 n8 m9 m8 n5 n4 m5 m4 n1 n0 a1 m0} + vshufps _r12, _r12, _r13, 0xEE \ // r12 = {n15 n14 m15 m14 n11 n10 m11 m10 n7 n6 m7 m6 n3 n2 a3 m2} + vshufps _t1, _r14, _r15, 0x44 \ // t1 = {p13 p12 013 012 p9 p8 09 08 p5 p4 05 04 p1 p0 01 00} + vshufps _r14, _r14, _r15, 0xEE \ // r14 = {p15 p14 015 014 p11 p10 011 010 p7 p6 07 06 p3 p2 03 02} + \ + vshufps _r15, _r10, _t1, 0xDD \ // r15 = {p13 013 n13 m13 p9 09 n9 m9 p5 05 n5 m5 p1 01 n1 m1} + vshufps _r13, _r12, _r14, 0x88 \ // r13 = {p14 014 n14 m14 p10 010 n10 m10 p6 06 n6 m6 p2 02 n2 m2} + vshufps _r12, _r12, _r14, 0xDD \ // r12 = {p15 015 n15 m15 p11 011 n11 m11 p7 07 n7 m7 p3 03 n3 m3} + vshufps _r10, _r10, _t1, 0x88 \ // r10 = {p12 012 n12 m12 p8 08 n8 m8 p4 04 n4 m4 p0 00 n0 m0} + \ + \ // At this point, the registers that contain interesting data are: + \ // t0, r3, r1, r0, r2, r7, r5, r4, r6, r11, r9, r8, r10, r15, r13, r12 + \ // Can use t1 and r14 as scratch registers + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX \ + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 \ + \ + vmovdqu32 _r14, [rbx] \ + vpermi2q _r14, _t0, _r2 \ // r14 = {h8 g8 f8 e8 d8 c8 b8 a8 h0 g0 f0 e0 d0 c0 b0 a0} + vmovdqu32 _t1, [r8] \ + vpermi2q _t1, _t0, _r2 \ // t1 = {h12 g12 f12 e12 d12 c12 b12 a12 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vmovdqu32 _r2, [rbx] \ + vpermi2q _r2, _r3, _r7 \ // r2 = {h9 g9 f9 e9 d9 c9 b9 a9 h1 g1 f1 e1 d1 c1 b1 a1} + vmovdqu32 _t0, [r8] \ + vpermi2q _t0, _r3, _r7 \ // t0 = {h13 g13 f13 e13 d13 c13 b13 a13 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vmovdqu32 _r3, [rbx] \ + vpermi2q _r3, _r1, _r5 \ // r3 = {h10 g10 f10 e10 d10 c10 b10 a10 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r7, [r8] \ + vpermi2q _r7, _r1, _r5 \ // r7 = {h14 g14 f14 e14 d14 c14 b14 a14 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vmovdqu32 _r1, [rbx] \ + vpermi2q _r1, _r0, _r4 \ // r1 = {h11 g11 f11 e11 d11 c11 b11 a11 h3 g3 f3 e3 d3 c3 b3 a3} + vmovdqu32 _r5, [r8] \ + vpermi2q _r5, _r0, _r4 \ // r5 = {h15 g15 f15 e15 d15 c15 b15 a15 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vmovdqu32 _r0, [rbx] \ + vpermi2q _r0, _r6, _r10 \ // r0 = {p8 o8 n8 m8 l8 k8 j8 i8 p0 o0 n0 m0 l0 k0 j0 i0} + vmovdqu32 _r4, [r8] \ + vpermi2q _r4, _r6, _r10 \ // r4 = {p12 o12 n12 m12 l12 k12 j12 i12 p4 o4 n4 m4 l4 k4 j4 i4} + \ + vmovdqu32 _r6, [rbx] \ + vpermi2q _r6, _r11, _r15 \ // r6 = {p9 o9 n9 m9 l9 k9 j9 i9 p1 o1 n1 m1 l1 k1 j1 i1} + vmovdqu32 _r10, [r8] \ + vpermi2q _r10, _r11, _r15 \ // r10 = {p13 o13 n13 m13 l13 k13 j13 i13 p5 o5 n5 m5 l5 k5 j5 i5} + \ + vmovdqu32 _r11, [rbx] \ + vpermi2q _r11, _r9, _r13 \ // r11 = {p10 o10 n10 m10 l10 k10 j10 i10 p2 o2 n2 m2 l2 k2 j2 i2} + vmovdqu32 _r15, [r8] \ + vpermi2q _r15, _r9, _r13 \ // r15 = {p14 o14 n14 m14 l14 k14 j14 i14 p6 o6 n6 m6 l6 k6 j6 i6} + \ + vmovdqu32 _r9, [rbx] \ + vpermi2q _r9, _r8, _r12 \ // r9 = {p11 o11 n11 m11 l11 k11 j11 i11 p3 o3 n3 m3 l3 k3 j3 i3} + vmovdqu32 _r13, [r8] \ + vpermi2q _r13, _r8, _r12 \ // r13 = {p15 o15 n15 m15 l15 k15 j15 i15 p7 o7 n7 m7 l7 k7 j7 i7} + \ + \ // At this point r8 and r12 can be used as scratch registers + vshuff64x2 _r8, _r14, _r0, 0xEE \ // r8 = {p8 o8 n8 m8 l8 k8 j8 i8 h8 g8 f8 e8 d8 c8 b8 a8} + vshuff64x2 _r0, _r14, _r0, 0x44 \ // r0 = {p0 o0 n0 m0 l0 k0 j0 i0 h0 g0 f0 e0 d0 c0 b0 a0} + \ + vshuff64x2 _r12, _t1, _r4, 0xEE \ // r12 = {p12 o12 n12 m12 l12 k12 j12 i12 h12 g12 f12 e12 d12 c12 b12 a12} + vshuff64x2 _r4, _t1, _r4, 0x44 \ // r4 = {p4 o4 n4 m4 l4 k4 j4 i4 h4 g4 f4 e4 d4 c4 b4 a4} + \ + vshuff64x2 _r14, _r7, _r15, 0xEE \ // r14 = {p14 o14 n14 m14 l14 k14 j14 i14 h14 g14 f14 e14 d14 c14 b14 a14} + vshuff64x2 _t1, _r7, _r15, 0x44 \ // t1 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + \ + vshuff64x2 _r15, _r5, _r13, 0xEE \ // r15 = {p15 o15 n15 m15 l15 k15 j15 i15 h15 g15 f15 e15 d15 c15 b15 a15} + vshuff64x2 _r7, _r5, _r13, 0x44 \ // r7 = {p7 o7 n7 m7 l7 k7 j7 i7 h7 g7 f7 e7 d7 c7 b7 a7} + \ + vshuff64x2 _r13, _t0, _r10, 0xEE \ // r13 = {p13 o13 n13 m13 l13 k13 j13 i13 h13 g13 f13 e13 d13 c13 b13 a13} + vshuff64x2 _r5, _t0, _r10, 0x44 \ // r5 = {p5 o5 n5 m5 l5 k5 j5 i5 h5 g5 f5 e5 d5 c5 b5 a5} + \ + vshuff64x2 _r10, _r3, _r11, 0xEE \ // r10 = {p10 o10 n10 m10 l10 k10 j10 i10 h10 g10 f10 e10 d10 c10 b10 a10} + vshuff64x2 _t0, _r3, _r11, 0x44 \ // t0 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + \ + vshuff64x2 _r11, _r1, _r9, 0xEE \ // r11 = {p11 o11 n11 m11 l11 k11 j11 i11 h11 g11 f11 e11 d11 c11 b11 a11} + vshuff64x2 _r3, _r1, _r9, 0x44 \ // r3 = {p3 o3 n3 m3 l3 k3 j3 i3 h3 g3 f3 e3 d3 c3 b3 a3} + \ + vshuff64x2 _r9, _r2, _r6, 0xEE \ // r9 = {p9 o9 n9 m9 l9 k9 j9 i9 h9 g9 f9 e9 d9 c9 b9 a9} + vshuff64x2 _r1, _r2, _r6, 0x44 \ // r1 = {p1 o1 n1 m1 l1 k1 j1 i1 h1 g1 f1 e1 d1 c1 b1 a1} + \ + vmovdqu32 _r2, _t0 \ // r2 = {p2 o2 n2 m2 l2 k2 j2 i2 h2 g2 f2 e2 d2 c2 b2 a2} + vmovdqu32 _r6, _t1 \ // r6 = {p6 o6 n6 m6 l6 k6 j6 i6 h6 g6 f6 e6 d6 c6 b6 a6} + + +// CH(A, B, C) = (A&B) ^ (~A&C) +// MAJ(E, F, G) = (E&F) ^ (E&G) ^ (F&G) +// SIGMA0 = ROR_2 ^ ROR_13 ^ ROR_22 +// SIGMA1 = ROR_6 ^ ROR_11 ^ ROR_25 +// sigma0 = ROR_7 ^ ROR_18 ^ SHR_3 +// sigma1 = ROR_17 ^ ROR_19 ^ SHR_10 + +// Main processing loop per round +#define PROCESS_LOOP(_WT, _ROUND, _A, _B, _C, _D, _E, _F, _G, _H) \ + \ // T1 = H + SIGMA1(E) + CH(E, F, G) + Kt + Wt + \ // T2 = SIGMA0(A) + MAJ(A, B, C) + \ // H=G, G=F, F=E, E=D+T1, D=C, C=B, B=A, A=T1+T2 + \ + \ // H becomes T2, then add T1 for A + \ // D becomes D + T1 for E + \ + vpaddd T1, _H, TMP3 \ // T1 = H + Kt + vmovdqu32 TMP0, _E \ + vprord TMP1, _E, 6 \ // ROR_6(E) + vprord TMP2, _E, 11 \ // ROR_11(E) + vprord TMP3, _E, 25 \ // ROR_25(E) + vpternlogd TMP0, _F, _G, 0xCA \ // TMP0 = CH(E,F,G) + vpaddd T1, T1, _WT \ // T1 = T1 + Wt + vpternlogd TMP1, TMP2, TMP3, 0x96 \ // TMP1 = SIGMA1(E) + vpaddd T1, T1, TMP0 \ // T1 = T1 + CH(E,F,G) + vpaddd T1, T1, TMP1 \ // T1 = T1 + SIGMA1(E) + vpaddd _D, _D, T1 \ // D = D + T1 + \ + vprord _H, _A, 2 \ // ROR_2(A) + vprord TMP2, _A, 13 \ // ROR_13(A) + vprord TMP3, _A, 22 \ // ROR_22(A) + vmovdqu32 TMP0, _A \ + vpternlogd TMP0, _B, _C, 0xE8 \ // TMP0 = MAJ(A,B,C) + vpternlogd _H, TMP2, TMP3, 0x96 \ // H(T2) = SIGMA0(A) + vpaddd _H, _H, TMP0 \ // H(T2) = SIGMA0(A) + MAJ(A,B,C) + vpaddd _H, _H, T1 \ // H(A) = H(T2) + T1 + \ + vmovdqu32 TMP3, [TBL + ((_ROUND+1)*64)] \ // Next Kt + + +#define MSG_SCHED_ROUND_16_63(_WT, _WTp1, _WTp9, _WTp14) \ + vprord TMP4, _WTp14, 17 \ // ROR_17(Wt-2) + vprord TMP5, _WTp14, 19 \ // ROR_19(Wt-2) + vpsrld TMP6, _WTp14, 10 \ // SHR_10(Wt-2) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma1(Wt-2) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + vpaddd _WT, _WT, _WTp9 \ // Wt = Wt-16 + sigma1(Wt-2) + Wt-7 + \ + vprord TMP4, _WTp1, 7 \ // ROR_7(Wt-15) + vprord TMP5, _WTp1, 18 \ // ROR_18(Wt-15) + vpsrld TMP6, _WTp1, 3 \ // SHR_3(Wt-15) + vpternlogd TMP4, TMP5, TMP6, 0x96 \ // TMP4 = sigma0(Wt-15) + \ + vpaddd _WT, _WT, TMP4 \ // Wt = Wt-16 + sigma1(Wt-2) + + \ // Wt-7 + sigma0(Wt-15) + + + +// Note this is reading in a block of data for one lane +// When all 16 are read, the data must be transposed to build msg schedule +#define MSG_SCHED_ROUND_00_15(_WT, OFFSET, LABEL) \ + TESTQ $(1<(SB), TBL_P9 + vmovdqu32 TMP2, [TBL] + + // Get first K from table + MOVQ table+16(FP), TBL_P9 + vmovdqu32 TMP3, [TBL] + + // Save digests for later addition + vmovdqu32 [SCRATCH + 64*0], A + vmovdqu32 [SCRATCH + 64*1], B + vmovdqu32 [SCRATCH + 64*2], C + vmovdqu32 [SCRATCH + 64*3], D + vmovdqu32 [SCRATCH + 64*4], E + vmovdqu32 [SCRATCH + 64*5], F + vmovdqu32 [SCRATCH + 64*6], G + vmovdqu32 [SCRATCH + 64*7], H + + add IDX, 64 + + // Transpose input data + TRANSPOSE16(W0, W1, W2, W3, W4, W5, W6, W7, W8, W9, W10, W11, W12, W13, W14, W15, TMP0, TMP1) + + vpshufb W0, W0, TMP2 + vpshufb W1, W1, TMP2 + vpshufb W2, W2, TMP2 + vpshufb W3, W3, TMP2 + vpshufb W4, W4, TMP2 + vpshufb W5, W5, TMP2 + vpshufb W6, W6, TMP2 + vpshufb W7, W7, TMP2 + vpshufb W8, W8, TMP2 + vpshufb W9, W9, TMP2 + vpshufb W10, W10, TMP2 + vpshufb W11, W11, TMP2 + vpshufb W12, W12, TMP2 + vpshufb W13, W13, TMP2 + vpshufb W14, W14, TMP2 + vpshufb W15, W15, TMP2 + + // MSG Schedule for W0-W15 is now complete in registers + // Process first 48 rounds + // Calculate next Wt+16 after processing is complete and Wt is unneeded + + PROCESS_LOOP( W0, 0, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 1, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 2, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 3, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 4, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 5, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 6, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 7, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 8, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 9, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 10, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 11, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 12, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 13, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 14, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 15, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 16, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 17, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 18, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 19, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 20, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 21, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 22, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 23, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 24, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 25, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 26, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 27, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 28, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 29, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 30, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 31, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + PROCESS_LOOP( W0, 32, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W0, W1, W9, W14) + PROCESS_LOOP( W1, 33, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W1, W2, W10, W15) + PROCESS_LOOP( W2, 34, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63( W2, W3, W11, W0) + PROCESS_LOOP( W3, 35, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63( W3, W4, W12, W1) + PROCESS_LOOP( W4, 36, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63( W4, W5, W13, W2) + PROCESS_LOOP( W5, 37, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63( W5, W6, W14, W3) + PROCESS_LOOP( W6, 38, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63( W6, W7, W15, W4) + PROCESS_LOOP( W7, 39, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63( W7, W8, W0, W5) + PROCESS_LOOP( W8, 40, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_16_63( W8, W9, W1, W6) + PROCESS_LOOP( W9, 41, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_16_63( W9, W10, W2, W7) + PROCESS_LOOP(W10, 42, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_16_63(W10, W11, W3, W8) + PROCESS_LOOP(W11, 43, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_16_63(W11, W12, W4, W9) + PROCESS_LOOP(W12, 44, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_16_63(W12, W13, W5, W10) + PROCESS_LOOP(W13, 45, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_16_63(W13, W14, W6, W11) + PROCESS_LOOP(W14, 46, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_16_63(W14, W15, W7, W12) + PROCESS_LOOP(W15, 47, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_16_63(W15, W0, W8, W13) + + // Check if this is the last block + sub INP_SIZE, 1 + JE lastLoop + + // Load next mask for inputs + ADDQ $8, MASKP_P9 + MOVQ (MASKP_P9), MASK_P9 + + // Process last 16 rounds + // Read in next block msg data for use in first 16 words of msg sched + + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W0, 0, skipNext0) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W1, 1, skipNext1) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15( W2, 2, skipNext2) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15( W3, 3, skipNext3) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15( W4, 4, skipNext4) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15( W5, 5, skipNext5) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15( W6, 6, skipNext6) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15( W7, 7, skipNext7) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + MSG_SCHED_ROUND_00_15( W8, 8, skipNext8) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + MSG_SCHED_ROUND_00_15( W9, 9, skipNext9) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + MSG_SCHED_ROUND_00_15(W10, 10, skipNext10) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + MSG_SCHED_ROUND_00_15(W11, 11, skipNext11) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + MSG_SCHED_ROUND_00_15(W12, 12, skipNext12) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + MSG_SCHED_ROUND_00_15(W13, 13, skipNext13) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + MSG_SCHED_ROUND_00_15(W14, 14, skipNext14) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + MSG_SCHED_ROUND_00_15(W15, 15, skipNext15) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + kmovq k1, mask + JMP lloop + +lastLoop: + // Process last 16 rounds + PROCESS_LOOP( W0, 48, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W1, 49, H, A, B, C, D, E, F, G) + PROCESS_LOOP( W2, 50, G, H, A, B, C, D, E, F) + PROCESS_LOOP( W3, 51, F, G, H, A, B, C, D, E) + PROCESS_LOOP( W4, 52, E, F, G, H, A, B, C, D) + PROCESS_LOOP( W5, 53, D, E, F, G, H, A, B, C) + PROCESS_LOOP( W6, 54, C, D, E, F, G, H, A, B) + PROCESS_LOOP( W7, 55, B, C, D, E, F, G, H, A) + PROCESS_LOOP( W8, 56, A, B, C, D, E, F, G, H) + PROCESS_LOOP( W9, 57, H, A, B, C, D, E, F, G) + PROCESS_LOOP(W10, 58, G, H, A, B, C, D, E, F) + PROCESS_LOOP(W11, 59, F, G, H, A, B, C, D, E) + PROCESS_LOOP(W12, 60, E, F, G, H, A, B, C, D) + PROCESS_LOOP(W13, 61, D, E, F, G, H, A, B, C) + PROCESS_LOOP(W14, 62, C, D, E, F, G, H, A, B) + PROCESS_LOOP(W15, 63, B, C, D, E, F, G, H, A) + + // Add old digest + vmovdqu32 TMP2, A + vmovdqu32 A, [SCRATCH + 64*0] + vpaddd A{k1}, A, TMP2 + vmovdqu32 TMP2, B + vmovdqu32 B, [SCRATCH + 64*1] + vpaddd B{k1}, B, TMP2 + vmovdqu32 TMP2, C + vmovdqu32 C, [SCRATCH + 64*2] + vpaddd C{k1}, C, TMP2 + vmovdqu32 TMP2, D + vmovdqu32 D, [SCRATCH + 64*3] + vpaddd D{k1}, D, TMP2 + vmovdqu32 TMP2, E + vmovdqu32 E, [SCRATCH + 64*4] + vpaddd E{k1}, E, TMP2 + vmovdqu32 TMP2, F + vmovdqu32 F, [SCRATCH + 64*5] + vpaddd F{k1}, F, TMP2 + vmovdqu32 TMP2, G + vmovdqu32 G, [SCRATCH + 64*6] + vpaddd G{k1}, G, TMP2 + vmovdqu32 TMP2, H + vmovdqu32 H, [SCRATCH + 64*7] + vpaddd H{k1}, H, TMP2 + + // Write out digest + vmovdqu32 [STATE + 0*SHA256_DIGEST_ROW_SIZE], A + vmovdqu32 [STATE + 1*SHA256_DIGEST_ROW_SIZE], B + vmovdqu32 [STATE + 2*SHA256_DIGEST_ROW_SIZE], C + vmovdqu32 [STATE + 3*SHA256_DIGEST_ROW_SIZE], D + vmovdqu32 [STATE + 4*SHA256_DIGEST_ROW_SIZE], E + vmovdqu32 [STATE + 5*SHA256_DIGEST_ROW_SIZE], F + vmovdqu32 [STATE + 6*SHA256_DIGEST_ROW_SIZE], G + vmovdqu32 [STATE + 7*SHA256_DIGEST_ROW_SIZE], H + + VZEROUPPER + RET + +// +// Tables +// + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 + +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go new file mode 100644 index 000000000..e6bd455df --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.go @@ -0,0 +1,500 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +import ( + "encoding/binary" + "errors" + "hash" + "sort" + "sync/atomic" + "time" +) + +//go:noescape +func sha256X16Avx512(digests *[512]byte, scratch *[512]byte, table *[512]uint64, mask []uint64, inputs [16][]byte) + +// Avx512ServerUID - Do not start at 0 but next multiple of 16 so as to be able to +// differentiate with default initialiation value of 0 +const Avx512ServerUID = 16 + +var uidCounter uint64 + +// NewAvx512 - initialize sha256 Avx512 implementation. +func NewAvx512(a512srv *Avx512Server) hash.Hash { + uid := atomic.AddUint64(&uidCounter, 1) + return &Avx512Digest{uid: uid, a512srv: a512srv} +} + +// Avx512Digest - Type for computing SHA256 using Avx512 +type Avx512Digest struct { + uid uint64 + a512srv *Avx512Server + x [chunk]byte + nx int + len uint64 + final bool + result [Size]byte +} + +// Size - Return size of checksum +func (d *Avx512Digest) Size() int { return Size } + +// BlockSize - Return blocksize of checksum +func (d Avx512Digest) BlockSize() int { return BlockSize } + +// Reset - reset sha digest to its initial values +func (d *Avx512Digest) Reset() { + d.a512srv.blocksCh <- blockInput{uid: d.uid, reset: true} + d.nx = 0 + d.len = 0 + d.final = false +} + +// Write to digest +func (d *Avx512Digest) Write(p []byte) (nn int, err error) { + + if d.final { + return 0, errors.New("Avx512Digest already finalized. Reset first before writing again") + } + + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: d.x[:]} + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: p[:n]} + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Sum - Return sha256 sum in bytes +func (d *Avx512Digest) Sum(in []byte) (result []byte) { + + if d.final { + return append(in, d.result[:]...) + } + + trail := make([]byte, 0, 128) + + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + trail = append(d.x[:d.nx], tmp[0:56-len%64]...) + } else { + trail = append(d.x[:d.nx], tmp[0:64+56-len%64]...) + } + d.nx = 0 + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + trail = append(trail, tmp[0:8]...) + + sumCh := make(chan [Size]byte) + d.a512srv.blocksCh <- blockInput{uid: d.uid, msg: trail, final: true, sumCh: sumCh} + d.result = <-sumCh + d.final = true + return append(in, d.result[:]...) +} + +var table = [512]uint64{ + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, 0x428a2f98428a2f98, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, 0x7137449171374491, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, 0xb5c0fbcfb5c0fbcf, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, 0xe9b5dba5e9b5dba5, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, 0x3956c25b3956c25b, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, 0x59f111f159f111f1, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, 0x923f82a4923f82a4, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, 0xab1c5ed5ab1c5ed5, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, 0xd807aa98d807aa98, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, 0x12835b0112835b01, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, 0x243185be243185be, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, 0x550c7dc3550c7dc3, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, 0x72be5d7472be5d74, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, 0x80deb1fe80deb1fe, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, 0x9bdc06a79bdc06a7, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, 0xc19bf174c19bf174, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, 0xe49b69c1e49b69c1, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, 0xefbe4786efbe4786, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, 0x0fc19dc60fc19dc6, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, 0x240ca1cc240ca1cc, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, 0x2de92c6f2de92c6f, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, 0x4a7484aa4a7484aa, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, 0x5cb0a9dc5cb0a9dc, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, 0x76f988da76f988da, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, 0x983e5152983e5152, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, 0xa831c66da831c66d, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, 0xb00327c8b00327c8, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, 0xbf597fc7bf597fc7, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, 0xc6e00bf3c6e00bf3, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, 0xd5a79147d5a79147, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, 0x06ca635106ca6351, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, 0x1429296714292967, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, 0x27b70a8527b70a85, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, 0x2e1b21382e1b2138, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, 0x4d2c6dfc4d2c6dfc, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, 0x53380d1353380d13, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, 0x650a7354650a7354, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, 0x766a0abb766a0abb, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, 0x81c2c92e81c2c92e, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, 0x92722c8592722c85, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, 0xa2bfe8a1a2bfe8a1, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, 0xa81a664ba81a664b, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, 0xc24b8b70c24b8b70, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, 0xc76c51a3c76c51a3, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, 0xd192e819d192e819, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, 0xd6990624d6990624, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, 0xf40e3585f40e3585, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, 0x106aa070106aa070, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, 0x19a4c11619a4c116, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, 0x1e376c081e376c08, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, 0x2748774c2748774c, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, 0x34b0bcb534b0bcb5, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, 0x391c0cb3391c0cb3, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, 0x4ed8aa4a4ed8aa4a, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, 0x5b9cca4f5b9cca4f, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, 0x682e6ff3682e6ff3, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, 0x748f82ee748f82ee, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, 0x78a5636f78a5636f, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, 0x84c8781484c87814, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, 0x8cc702088cc70208, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, 0x90befffa90befffa, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, 0xa4506ceba4506ceb, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, 0xbef9a3f7bef9a3f7, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, + 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2, 0xc67178f2c67178f2} + +// Interface function to assembly ode +func blockAvx512(digests *[512]byte, input [16][]byte, mask []uint64) [16][Size]byte { + + scratch := [512]byte{} + sha256X16Avx512(digests, &scratch, &table, mask, input) + + output := [16][Size]byte{} + for i := 0; i < 16; i++ { + output[i] = getDigest(i, digests[:]) + } + + return output +} + +func getDigest(index int, state []byte) (sum [Size]byte) { + for j := 0; j < 16; j += 2 { + for i := index*4 + j*Size; i < index*4+(j+1)*Size; i += Size { + binary.BigEndian.PutUint32(sum[j*2:], binary.LittleEndian.Uint32(state[i:i+4])) + } + } + return +} + +// Message to send across input channel +type blockInput struct { + uid uint64 + msg []byte + reset bool + final bool + sumCh chan [Size]byte +} + +// Avx512Server - Type to implement 16x parallel handling of SHA256 invocations +type Avx512Server struct { + blocksCh chan blockInput // Input channel + totalIn int // Total number of inputs waiting to be processed + lanes [16]Avx512LaneInfo // Array with info per lane (out of 16) + digests map[uint64][Size]byte // Map of uids to (interim) digest results +} + +// Avx512LaneInfo - Info for each lane +type Avx512LaneInfo struct { + uid uint64 // unique identification for this SHA processing + block []byte // input block to be processed + outputCh chan [Size]byte // channel for output result +} + +// NewAvx512Server - Create new object for parallel processing handling +func NewAvx512Server() *Avx512Server { + a512srv := &Avx512Server{} + a512srv.digests = make(map[uint64][Size]byte) + a512srv.blocksCh = make(chan blockInput) + + // Start a single thread for reading from the input channel + go a512srv.Process() + return a512srv +} + +// Process - Sole handler for reading from the input channel +func (a512srv *Avx512Server) Process() { + for { + select { + case block := <-a512srv.blocksCh: + if block.reset { + a512srv.reset(block.uid) + continue + } + index := block.uid & 0xf + // fmt.Println("Adding message:", block.uid, index) + + if a512srv.lanes[index].block != nil { // If slot is already filled, process all inputs + //fmt.Println("Invoking Blocks()") + a512srv.blocks() + } + a512srv.totalIn++ + a512srv.lanes[index] = Avx512LaneInfo{uid: block.uid, block: block.msg} + if block.final { + a512srv.lanes[index].outputCh = block.sumCh + } + if a512srv.totalIn == len(a512srv.lanes) { + // fmt.Println("Invoking Blocks() while FULL: ") + a512srv.blocks() + } + + // TODO: test with larger timeout + case <-time.After(1 * time.Microsecond): + for _, lane := range a512srv.lanes { + if lane.block != nil { // check if there is any input to process + // fmt.Println("Invoking Blocks() on TIMEOUT: ") + a512srv.blocks() + break // we are done + } + } + } + } +} + +// Do a reset for this calculation +func (a512srv *Avx512Server) reset(uid uint64) { + + // Check if there is a message still waiting to be processed (and remove if so) + for i, lane := range a512srv.lanes { + if lane.uid == uid { + if lane.block != nil { + a512srv.lanes[i] = Avx512LaneInfo{} // clear message + a512srv.totalIn-- + } + } + } + + // Delete entry from hash map + delete(a512srv.digests, uid) +} + +// Invoke assembly and send results back +func (a512srv *Avx512Server) blocks() (err error) { + + inputs := [16][]byte{} + for i := range inputs { + inputs[i] = a512srv.lanes[i].block + } + + mask := expandMask(genMask(inputs)) + outputs := blockAvx512(a512srv.getDigests(), inputs, mask) + + a512srv.totalIn = 0 + for i := 0; i < len(outputs); i++ { + uid, outputCh := a512srv.lanes[i].uid, a512srv.lanes[i].outputCh + a512srv.digests[uid] = outputs[i] + a512srv.lanes[i] = Avx512LaneInfo{} + + if outputCh != nil { + // Send back result + outputCh <- outputs[i] + delete(a512srv.digests, uid) // Delete entry from hashmap + } + } + return +} + +func (a512srv *Avx512Server) Write(uid uint64, p []byte) (nn int, err error) { + a512srv.blocksCh <- blockInput{uid: uid, msg: p} + return len(p), nil +} + +// Sum - return sha256 sum in bytes for a given sum id. +func (a512srv *Avx512Server) Sum(uid uint64, p []byte) [32]byte { + sumCh := make(chan [32]byte) + a512srv.blocksCh <- blockInput{uid: uid, msg: p, final: true, sumCh: sumCh} + return <-sumCh +} + +func (a512srv *Avx512Server) getDigests() *[512]byte { + digests := [512]byte{} + for i, lane := range a512srv.lanes { + a, ok := a512srv.digests[lane.uid] + if ok { + binary.BigEndian.PutUint32(digests[(i+0*16)*4:], binary.LittleEndian.Uint32(a[0:4])) + binary.BigEndian.PutUint32(digests[(i+1*16)*4:], binary.LittleEndian.Uint32(a[4:8])) + binary.BigEndian.PutUint32(digests[(i+2*16)*4:], binary.LittleEndian.Uint32(a[8:12])) + binary.BigEndian.PutUint32(digests[(i+3*16)*4:], binary.LittleEndian.Uint32(a[12:16])) + binary.BigEndian.PutUint32(digests[(i+4*16)*4:], binary.LittleEndian.Uint32(a[16:20])) + binary.BigEndian.PutUint32(digests[(i+5*16)*4:], binary.LittleEndian.Uint32(a[20:24])) + binary.BigEndian.PutUint32(digests[(i+6*16)*4:], binary.LittleEndian.Uint32(a[24:28])) + binary.BigEndian.PutUint32(digests[(i+7*16)*4:], binary.LittleEndian.Uint32(a[28:32])) + } else { + binary.LittleEndian.PutUint32(digests[(i+0*16)*4:], init0) + binary.LittleEndian.PutUint32(digests[(i+1*16)*4:], init1) + binary.LittleEndian.PutUint32(digests[(i+2*16)*4:], init2) + binary.LittleEndian.PutUint32(digests[(i+3*16)*4:], init3) + binary.LittleEndian.PutUint32(digests[(i+4*16)*4:], init4) + binary.LittleEndian.PutUint32(digests[(i+5*16)*4:], init5) + binary.LittleEndian.PutUint32(digests[(i+6*16)*4:], init6) + binary.LittleEndian.PutUint32(digests[(i+7*16)*4:], init7) + } + } + return &digests +} + +// Helper struct for sorting blocks based on length +type lane struct { + len uint + pos uint +} + +type lanes []lane + +func (lns lanes) Len() int { return len(lns) } +func (lns lanes) Swap(i, j int) { lns[i], lns[j] = lns[j], lns[i] } +func (lns lanes) Less(i, j int) bool { return lns[i].len < lns[j].len } + +// Helper struct for +type maskRounds struct { + mask uint64 + rounds uint64 +} + +func genMask(input [16][]byte) [16]maskRounds { + + // Sort on blocks length small to large + var sorted [16]lane + for c, inpt := range input { + sorted[c] = lane{uint(len(inpt)), uint(c)} + } + sort.Sort(lanes(sorted[:])) + + // Create mask array including 'rounds' between masks + m, round, index := uint64(0xffff), uint64(0), 0 + var mr [16]maskRounds + for _, s := range sorted { + if s.len > 0 { + if uint64(s.len)>>6 > round { + mr[index] = maskRounds{m, (uint64(s.len) >> 6) - round} + index++ + } + round = uint64(s.len) >> 6 + } + m = m & ^(1 << uint(s.pos)) + } + + return mr +} + +// TODO: remove function +func expandMask(mr [16]maskRounds) []uint64 { + size := uint64(0) + for _, r := range mr { + size += r.rounds + } + result, index := make([]uint64, size), 0 + for _, r := range mr { + for j := uint64(0); j < r.rounds; j++ { + result[index] = r.mask + index++ + } + } + return result +} diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s new file mode 100644 index 000000000..275bcacbc --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx512_amd64.s @@ -0,0 +1,267 @@ +//+build !noasm,!appengine + +TEXT ·sha256X16Avx512(SB), 7, $0 + MOVQ digests+0(FP), DI + MOVQ scratch+8(FP), R12 + MOVQ mask_len+32(FP), SI + MOVQ mask_base+24(FP), R13 + MOVQ (R13), R14 + LONG $0x92fbc1c4; BYTE $0xce + LEAQ inputs+48(FP), AX + QUAD $0xf162076f487ef162; QUAD $0x7ef162014f6f487e; QUAD $0x487ef16202576f48; QUAD $0x6f487ef162035f6f; QUAD $0x6f6f487ef1620467; QUAD $0x06776f487ef16205; LONG $0x487ef162; WORD $0x7f6f; BYTE $0x07 + MOVQ table+16(FP), DX + WORD $0x3148; BYTE $0xc9 + TESTQ $(1<<0), R14 + JE skipInput0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipInput0: + TESTQ $(1<<1), R14 + JE skipInput1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipInput1: + TESTQ $(1<<2), R14 + JE skipInput2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipInput2: + TESTQ $(1<<3), R14 + JE skipInput3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipInput3: + TESTQ $(1<<4), R14 + JE skipInput4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipInput4: + TESTQ $(1<<5), R14 + JE skipInput5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipInput5: + TESTQ $(1<<6), R14 + JE skipInput6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipInput6: + TESTQ $(1<<7), R14 + JE skipInput7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipInput7: + TESTQ $(1<<8), R14 + JE skipInput8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipInput8: + TESTQ $(1<<9), R14 + JE skipInput9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipInput9: + TESTQ $(1<<10), R14 + JE skipInput10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipInput10: + TESTQ $(1<<11), R14 + JE skipInput11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipInput11: + TESTQ $(1<<12), R14 + JE skipInput12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipInput12: + TESTQ $(1<<13), R14 + JE skipInput13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipInput13: + TESTQ $(1<<14), R14 + JE skipInput14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipInput14: + TESTQ $(1<<15), R14 + JE skipInput15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipInput15: +lloop: + LEAQ PSHUFFLE_BYTE_FLIP_MASK<>(SB), DX + LONG $0x487e7162; WORD $0x1a6f + MOVQ table+16(FP), DX + QUAD $0xd162226f487e7162; QUAD $0x7ed16224047f487e; QUAD $0x7ed16201244c7f48; QUAD $0x7ed1620224547f48; QUAD $0x7ed16203245c7f48; QUAD $0x7ed1620424647f48; QUAD $0x7ed16205246c7f48; QUAD $0x7ed1620624747f48; QUAD $0xc1834807247c7f48; QUAD $0x44c9c6407c316240; QUAD $0x62eec1c6407ca162; QUAD $0xa16244d3c6406c31; QUAD $0x34c162eed3c6406c; QUAD $0x407ca162dddac648; QUAD $0xc6407ca16288cac6; QUAD $0xcac648345162ddc2; QUAD $0x44d5c6405ca16288; QUAD $0x62eee5c6405ca162; QUAD $0xa16244d7c6404c31; QUAD $0x6cc162eef7c6404c; QUAD $0x405ca162ddfac640; QUAD $0xc6405ca16288eec6; QUAD $0xd2c6406cc162dde6; QUAD $0x44f1c6403c816288; QUAD $0x62eec1c6403c0162; QUAD $0x016244d3c6402c11; QUAD $0x4c4162eed3c6402c; QUAD $0x403c0162dddac640; QUAD $0xc6403c016288cac6; QUAD $0xf2c6404cc162ddc2; QUAD $0x44d5c6401c016288; QUAD $0x62eee5c6401c0162; QUAD $0x016244d7c6400c11; QUAD $0x2c4162eef7c6400c; QUAD $0x401c0162ddfac640; QUAD $0xc6401c016288eec6; QUAD $0xd2c6402c4162dde6; BYTE $0x88 + LEAQ PSHUFFLE_TRANSPOSE16_MASK1<>(SB), BX + LEAQ PSHUFFLE_TRANSPOSE16_MASK2<>(SB), R8 + QUAD $0x2262336f487e6162; QUAD $0x487e5162f27648b5; QUAD $0xd27648b53262106f; QUAD $0xa262136f487ee162; QUAD $0x487e5162d77640e5; QUAD $0xcf7640e53262086f; QUAD $0xa2621b6f487ee162; QUAD $0x487ec162dd7640f5; QUAD $0xfd7640f5a262386f; QUAD $0xa2620b6f487ee162; QUAD $0x487ec162cc7640fd; QUAD $0xec7640fda262286f; QUAD $0x8262036f487ee162; QUAD $0x487ec162c27640cd; QUAD $0xe27640cd8262206f; QUAD $0x8262336f487ee162; QUAD $0x487e4162f77640a5; QUAD $0xd77640a50262106f; QUAD $0x02621b6f487e6162; QUAD $0x487e4162dd7640b5; QUAD $0xfd7640b50262386f; QUAD $0x02620b6f487e6162; QUAD $0x487e4162cc7640bd; QUAD $0xec7640bd0262286f; QUAD $0x62eec023408d2362; QUAD $0x236244c023408da3; QUAD $0xada362eee42348ad; QUAD $0x40c5036244e42348; QUAD $0x2340c51362eef723; QUAD $0xfd2340d5036244d7; QUAD $0x44fd2340d58362ee; QUAD $0x62eeea2348b50362; QUAD $0x036244ea2348b583; QUAD $0xe51362eed32340e5; QUAD $0x40f5036244cb2340; QUAD $0x2340f58362eed923; QUAD $0xce2340ed236244d9; QUAD $0x44ce2340eda362ee; QUAD $0xc162d16f487ec162; QUAD $0x407dc262f26f487e; QUAD $0xcb004075c262c300; QUAD $0xc262d300406dc262; QUAD $0x405dc262db004065; QUAD $0xeb004055c262e300; QUAD $0xc262f300404dc262; QUAD $0x403d4262fb004045; QUAD $0xcb0040354262c300; QUAD $0x4262d300402d4262; QUAD $0x401d4262db004025; QUAD $0xeb0040154262e300; QUAD $0x4262f300400d4262; QUAD $0x48455162fb004005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6201626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916202626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16203; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16204626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16205626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x06626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16207626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1620862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6209626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1620a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591620b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91620c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591620d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x0e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591620f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591621062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6211626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916212626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16213; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16214626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16215626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x16626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16217626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1621862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6219626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1621a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591621b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91621c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591621d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x1e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591621f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591622062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x48455162fdfe4005; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d3162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6221626f487e7162; QUAD $0x916211c672481591; QUAD $0x05916213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe407dc16296ef25; QUAD $0x62c1fe407d8162c5; QUAD $0xb16207c1724815b1; QUAD $0x05b16212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe407dc16296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815916222626f48; QUAD $0x72480d916211c772; QUAD $0xd7724805916213c7; QUAD $0x96ef25480d53620a; QUAD $0x8162cdfe4075c162; QUAD $0x4815b162cafe4075; QUAD $0x72480db16207c272; QUAD $0xd2724805b16212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe4075c162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c0724815b16223; QUAD $0x6213c072480db162; QUAD $0x53620ad0724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe406d8162d5fe40; QUAD $0x07c3724815b162d3; QUAD $0x6212c372480db162; QUAD $0x536203d3724805b1; QUAD $0x6dc16296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d3162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0xb16224626f487e71; QUAD $0x0db16211c1724815; QUAD $0x4805b16213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4065c16296ef; QUAD $0xb162dcfe40658162; QUAD $0x0db16207c4724815; QUAD $0x4805b16212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4065c16296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x724815b16225626f; QUAD $0xc272480db16211c2; QUAD $0x0ad2724805b16213; QUAD $0x6296ef25480d5362; QUAD $0x5d8162e5fe405dc1; QUAD $0x724815b162e5fe40; QUAD $0xc572480db16207c5; QUAD $0x03d5724805b16212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe405dc1; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d3162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x26626f487e7162d0; QUAD $0x6211c3724815b162; QUAD $0xb16213c372480db1; QUAD $0x0d53620ad3724805; QUAD $0x4055c16296ef2548; QUAD $0xeefe40558162edfe; QUAD $0x6207c6724815b162; QUAD $0xb16212c672480db1; QUAD $0x0d536203d6724805; QUAD $0x4055c16296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x15b16227626f487e; QUAD $0x480db16211c47248; QUAD $0x724805b16213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe404dc16296; QUAD $0x15b162f7fe404d81; QUAD $0x480db16207c77248; QUAD $0x724805b16212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe404dc16296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc5724815b1622862; QUAD $0x13c572480db16211; QUAD $0x620ad5724805b162; QUAD $0xc16296ef25480d53; QUAD $0x4045a162fdfe4045; QUAD $0xc07248159162f8fe; QUAD $0x12c072480d916207; QUAD $0x6203d07248059162; QUAD $0xc16296ef25480d53; QUAD $0x48455162fdfe4045; QUAD $0xcc6f487e7162c4fe; QUAD $0x6206c472482df162; QUAD $0xf1620bc4724825f1; QUAD $0x55736219c472481d; QUAD $0x483d1162cace2548; QUAD $0xd42548255362c0fe; QUAD $0x62c1fe483d516296; QUAD $0x65d162c2fe483d51; QUAD $0x724845f162d8fe48; QUAD $0xc0724825f16202c0; QUAD $0x16c072481df1620d; QUAD $0x7362c86f487e7162; QUAD $0x25d362e8ca254875; QUAD $0x4845d16296fc2548; QUAD $0xf8fe4845d162f9fe; QUAD $0x6229626f487e7162; QUAD $0xb16211c6724815b1; QUAD $0x05b16213c672480d; QUAD $0x480d53620ad67248; QUAD $0xfe403d416296ef25; QUAD $0x62c1fe403d2162c5; QUAD $0x916207c172481591; QUAD $0x05916212c172480d; QUAD $0x480d536203d17248; QUAD $0xfe403d416296ef25; QUAD $0x62c4fe484d5162c5; QUAD $0x2df162cb6f487e71; QUAD $0x4825f16206c37248; QUAD $0x72481df1620bc372; QUAD $0xcd25485d736219c3; QUAD $0x62c1fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xd0fe486dd162c2fe; QUAD $0x6202c772484df162; QUAD $0xf1620dc7724825f1; QUAD $0x7e716216c772481d; QUAD $0x25487d7362cf6f48; QUAD $0xf4254825d362e8c9; QUAD $0x62f1fe484dd16296; QUAD $0x7e7162f0fe484dd1; QUAD $0x4815b1622a626f48; QUAD $0x72480db16211c772; QUAD $0xd7724805b16213c7; QUAD $0x96ef25480d53620a; QUAD $0x2162cdfe40354162; QUAD $0x48159162cafe4035; QUAD $0x72480d916207c272; QUAD $0xd2724805916212c2; QUAD $0x96ef25480d536203; QUAD $0x5162cdfe40354162; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x11c072481591622b; QUAD $0x6213c072480d9162; QUAD $0x53620ad072480591; QUAD $0x2d416296ef25480d; QUAD $0xfe402d2162d5fe40; QUAD $0x07c37248159162d3; QUAD $0x6212c372480d9162; QUAD $0x536203d372480591; QUAD $0x2d416296ef25480d; QUAD $0xfe485d5162d5fe40; QUAD $0x62c96f487e7162c4; QUAD $0xf16206c172482df1; QUAD $0x1df1620bc1724825; QUAD $0x486d736219c17248; QUAD $0xfe483d1162cacb25; QUAD $0x96d42548255362c3; QUAD $0x5162c1fe483d5162; QUAD $0x487dd162c2fe483d; QUAD $0xc572485df162c0fe; QUAD $0x0dc5724825f16202; QUAD $0x6216c572481df162; QUAD $0x4d7362cd6f487e71; QUAD $0x4825d362e8cf2548; QUAD $0xfe485dd16296e425; QUAD $0x62e0fe485dd162e1; QUAD $0x91622c626f487e71; QUAD $0x0d916211c1724815; QUAD $0x4805916213c17248; QUAD $0x25480d53620ad172; QUAD $0xddfe4025416296ef; QUAD $0x9162dcfe40252162; QUAD $0x0d916207c4724815; QUAD $0x4805916212c47248; QUAD $0x25480d536203d472; QUAD $0xddfe4025416296ef; QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; QUAD $0x72481591622d626f; QUAD $0xc272480d916211c2; QUAD $0x0ad2724805916213; QUAD $0x6296ef25480d5362; QUAD $0x1d2162e5fe401d41; QUAD $0x7248159162e5fe40; QUAD $0xc572480d916207c5; QUAD $0x03d5724805916212; QUAD $0x6296ef25480d5362; QUAD $0x6d5162e5fe401d41; QUAD $0x6f487e7162c4fe48; QUAD $0x06c772482df162cf; QUAD $0x620bc7724825f162; QUAD $0x736219c772481df1; QUAD $0x3d1162cac925487d; QUAD $0x2548255362c5fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x486df162f0fe484d; QUAD $0x724825f16202c372; QUAD $0xc372481df1620dc3; QUAD $0x62cb6f487e716216; QUAD $0xd362e8cd25485d73; QUAD $0x6dd16296d4254825; QUAD $0xfe486dd162d1fe48; QUAD $0x2e626f487e7162d0; QUAD $0x6211c37248159162; QUAD $0x916213c372480d91; QUAD $0x0d53620ad3724805; QUAD $0x4015416296ef2548; QUAD $0xeefe40152162edfe; QUAD $0x6207c67248159162; QUAD $0x916212c672480d91; QUAD $0x0d536203d6724805; QUAD $0x4015416296ef2548; QUAD $0xc4fe48755162edfe; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x1591622f626f487e; QUAD $0x480d916211c47248; QUAD $0x724805916213c472; QUAD $0xef25480d53620ad4; QUAD $0x62f5fe400d416296; QUAD $0x159162f7fe400d21; QUAD $0x480d916207c77248; QUAD $0x724805916212c772; QUAD $0xef25480d536203d7; QUAD $0x62f5fe400d416296; QUAD $0x7e7162c4fe487d51; QUAD $0x72482df162cd6f48; QUAD $0xc5724825f16206c5; QUAD $0x19c572481df1620b; QUAD $0x62cacf25484d7362; QUAD $0x255362c7fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162e0fe485dd162; QUAD $0x25f16202c172487d; QUAD $0x481df1620dc17248; QUAD $0x6f487e716216c172; QUAD $0xe8cb25486d7362c9; QUAD $0x6296c4254825d362; QUAD $0x7dd162c1fe487dd1; QUAD $0x6f487e7162c0fe48; QUAD $0xc572481591623062; QUAD $0x13c572480d916211; QUAD $0x620ad57248059162; QUAD $0x416296ef25480d53; QUAD $0x40050162fdfe4005; QUAD $0xc0724815b162f8fe; QUAD $0x12c072480db16207; QUAD $0x6203d0724805b162; QUAD $0x416296ef25480d53; QUAD $0x01ee8348fdfe4005 + JE lastLoop + ADDQ $8, R13 + MOVQ (R13), R14 + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x31 + TESTQ $(1<<0), R14 + JE skipNext0 + MOVQ 0*24(AX), R9 + LONG $0x487cc162; WORD $0x0410; BYTE $0x09 + +skipNext0: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x32 + TESTQ $(1<<1), R14 + JE skipNext1 + MOVQ 1*24(AX), R9 + LONG $0x487cc162; WORD $0x0c10; BYTE $0x09 + +skipNext1: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x33 + TESTQ $(1<<2), R14 + JE skipNext2 + MOVQ 2*24(AX), R9 + LONG $0x487cc162; WORD $0x1410; BYTE $0x09 + +skipNext2: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x34 + TESTQ $(1<<3), R14 + JE skipNext3 + MOVQ 3*24(AX), R9 + LONG $0x487cc162; WORD $0x1c10; BYTE $0x09 + +skipNext3: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x35 + TESTQ $(1<<4), R14 + JE skipNext4 + MOVQ 4*24(AX), R9 + LONG $0x487cc162; WORD $0x2410; BYTE $0x09 + +skipNext4: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x36 + TESTQ $(1<<5), R14 + JE skipNext5 + MOVQ 5*24(AX), R9 + LONG $0x487cc162; WORD $0x2c10; BYTE $0x09 + +skipNext5: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x37 + TESTQ $(1<<6), R14 + JE skipNext6 + MOVQ 6*24(AX), R9 + LONG $0x487cc162; WORD $0x3410; BYTE $0x09 + +skipNext6: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x38 + TESTQ $(1<<7), R14 + JE skipNext7 + MOVQ 7*24(AX), R9 + LONG $0x487cc162; WORD $0x3c10; BYTE $0x09 + +skipNext7: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; WORD $0x626f; BYTE $0x39 + TESTQ $(1<<8), R14 + JE skipNext8 + MOVQ 8*24(AX), R9 + LONG $0x487c4162; WORD $0x0410; BYTE $0x09 + +skipNext8: + QUAD $0x7162c4fe484d5162; QUAD $0x482df162cb6f487e; QUAD $0x724825f16206c372; QUAD $0xc372481df1620bc3; QUAD $0xcacd25485d736219; QUAD $0x5362c1fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d0fe486dd162c2; QUAD $0xf16202c772484df1; QUAD $0x1df1620dc7724825; QUAD $0x487e716216c77248; QUAD $0xc925487d7362cf6f; QUAD $0x96f4254825d362e8; QUAD $0xd162f1fe484dd162; QUAD $0x487e7162f0fe484d; WORD $0x626f; BYTE $0x3a + TESTQ $(1<<9), R14 + JE skipNext9 + MOVQ 9*24(AX), R9 + LONG $0x487c4162; WORD $0x0c10; BYTE $0x09 + +skipNext9: + QUAD $0x7162c4fe48555162; QUAD $0x482df162ca6f487e; QUAD $0x724825f16206c272; QUAD $0xc272481df1620bc2; QUAD $0xcacc254865736219; QUAD $0x5362c2fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c8fe4875d162c2; QUAD $0xf16202c6724855f1; QUAD $0x1df1620dc6724825; QUAD $0x487e716216c67248; QUAD $0xc82548457362ce6f; QUAD $0x96ec254825d362e8; QUAD $0xd162e9fe4855d162; QUAD $0x487e7162e8fe4855; WORD $0x626f; BYTE $0x3b + TESTQ $(1<<10), R14 + JE skipNext10 + MOVQ 10*24(AX), R9 + LONG $0x487c4162; WORD $0x1410; BYTE $0x09 + +skipNext10: + QUAD $0x7162c4fe485d5162; QUAD $0x482df162c96f487e; QUAD $0x724825f16206c172; QUAD $0xc172481df1620bc1; QUAD $0xcacb25486d736219; QUAD $0x5362c3fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62c0fe487dd162c2; QUAD $0xf16202c572485df1; QUAD $0x1df1620dc5724825; QUAD $0x487e716216c57248; QUAD $0xcf25484d7362cd6f; QUAD $0x96e4254825d362e8; QUAD $0xd162e1fe485dd162; QUAD $0x487e7162e0fe485d; WORD $0x626f; BYTE $0x3c + TESTQ $(1<<11), R14 + JE skipNext11 + MOVQ 11*24(AX), R9 + LONG $0x487c4162; WORD $0x1c10; BYTE $0x09 + +skipNext11: + QUAD $0x7162c4fe48655162; QUAD $0x482df162c86f487e; QUAD $0x724825f16206c072; QUAD $0xc072481df1620bc0; QUAD $0xcaca254875736219; QUAD $0x5362c4fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f8fe4845d162c2; QUAD $0xf16202c4724865f1; QUAD $0x1df1620dc4724825; QUAD $0x487e716216c47248; QUAD $0xce2548557362cc6f; QUAD $0x96dc254825d362e8; QUAD $0xd162d9fe4865d162; QUAD $0x487e7162d8fe4865; WORD $0x626f; BYTE $0x3d + TESTQ $(1<<12), R14 + JE skipNext12 + MOVQ 12*24(AX), R9 + LONG $0x487c4162; WORD $0x2410; BYTE $0x09 + +skipNext12: + QUAD $0x7162c4fe486d5162; QUAD $0x482df162cf6f487e; QUAD $0x724825f16206c772; QUAD $0xc772481df1620bc7; QUAD $0xcac925487d736219; QUAD $0x5362c5fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62f0fe484dd162c2; QUAD $0xf16202c372486df1; QUAD $0x1df1620dc3724825; QUAD $0x487e716216c37248; QUAD $0xcd25485d7362cb6f; QUAD $0x96d4254825d362e8; QUAD $0xd162d1fe486dd162; QUAD $0x487e7162d0fe486d; WORD $0x626f; BYTE $0x3e + TESTQ $(1<<13), R14 + JE skipNext13 + MOVQ 13*24(AX), R9 + LONG $0x487c4162; WORD $0x2c10; BYTE $0x09 + +skipNext13: + QUAD $0x7162c4fe48755162; QUAD $0x482df162ce6f487e; QUAD $0x724825f16206c672; QUAD $0xc672481df1620bc6; QUAD $0xcac8254845736219; QUAD $0x5362c6fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e8fe4855d162c2; QUAD $0xf16202c2724875f1; QUAD $0x1df1620dc2724825; QUAD $0x487e716216c27248; QUAD $0xcc2548657362ca6f; QUAD $0x96cc254825d362e8; QUAD $0xd162c9fe4875d162; QUAD $0x487e7162c8fe4875; WORD $0x626f; BYTE $0x3f + TESTQ $(1<<14), R14 + JE skipNext14 + MOVQ 14*24(AX), R9 + LONG $0x487c4162; WORD $0x3410; BYTE $0x09 + +skipNext14: + QUAD $0x7162c4fe487d5162; QUAD $0x482df162cd6f487e; QUAD $0x724825f16206c572; QUAD $0xc572481df1620bc5; QUAD $0xcacf25484d736219; QUAD $0x5362c7fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62e0fe485dd162c2; QUAD $0xf16202c172487df1; QUAD $0x1df1620dc1724825; QUAD $0x487e716216c17248; QUAD $0xcb25486d7362c96f; QUAD $0x96c4254825d362e8; QUAD $0xd162c1fe487dd162; QUAD $0x487e7162c0fe487d; WORD $0x626f; BYTE $0x40 + TESTQ $(1<<15), R14 + JE skipNext15 + MOVQ 15*24(AX), R9 + LONG $0x487c4162; WORD $0x3c10; BYTE $0x09 + +skipNext15: + QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0xc4fbfe4945d16207; LONG $0xce92fbc1 + JMP lloop + +lastLoop: + QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d3162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516231626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d3162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x516232626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x3162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d516233; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d3162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x4865516234626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d3162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x6235626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d31; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623662; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d3162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d516237626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d3162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x38626f487e7162c0; QUAD $0x7162c4fe48455162; QUAD $0x482df162cc6f487e; QUAD $0x724825f16206c472; QUAD $0xc472481df1620bc4; QUAD $0xcace254855736219; QUAD $0x5362c0fe483d1162; QUAD $0x3d516296d4254825; QUAD $0xfe483d5162c1fe48; QUAD $0x62d8fe4865d162c2; QUAD $0xf16202c0724845f1; QUAD $0x1df1620dc0724825; QUAD $0x487e716216c07248; QUAD $0xca2548757362c86f; QUAD $0x96fc254825d362e8; QUAD $0xd162f9fe4845d162; QUAD $0x487e7162f8fe4845; QUAD $0xfe484d516239626f; QUAD $0x62cb6f487e7162c4; QUAD $0xf16206c372482df1; QUAD $0x1df1620bc3724825; QUAD $0x485d736219c37248; QUAD $0xfe483d1162cacd25; QUAD $0x96d42548255362c1; QUAD $0x5162c1fe483d5162; QUAD $0x486dd162c2fe483d; QUAD $0xc772484df162d0fe; QUAD $0x0dc7724825f16202; QUAD $0x6216c772481df162; QUAD $0x7d7362cf6f487e71; QUAD $0x4825d362e8c92548; QUAD $0xfe484dd16296f425; QUAD $0x62f0fe484dd162f1; QUAD $0x51623a626f487e71; QUAD $0x487e7162c4fe4855; QUAD $0xc272482df162ca6f; QUAD $0x0bc2724825f16206; QUAD $0x6219c272481df162; QUAD $0x1162cacc25486573; QUAD $0x48255362c2fe483d; QUAD $0xfe483d516296d425; QUAD $0x62c2fe483d5162c1; QUAD $0x55f162c8fe4875d1; QUAD $0x4825f16202c67248; QUAD $0x72481df1620dc672; QUAD $0xce6f487e716216c6; QUAD $0x62e8c82548457362; QUAD $0xd16296ec254825d3; QUAD $0x4855d162e9fe4855; QUAD $0x626f487e7162e8fe; QUAD $0x62c4fe485d51623b; QUAD $0x2df162c96f487e71; QUAD $0x4825f16206c17248; QUAD $0x72481df1620bc172; QUAD $0xcb25486d736219c1; QUAD $0x62c3fe483d1162ca; QUAD $0x516296d425482553; QUAD $0x483d5162c1fe483d; QUAD $0xc0fe487dd162c2fe; QUAD $0x6202c572485df162; QUAD $0xf1620dc5724825f1; QUAD $0x7e716216c572481d; QUAD $0x25484d7362cd6f48; QUAD $0xe4254825d362e8cf; QUAD $0x62e1fe485dd16296; QUAD $0x7e7162e0fe485dd1; QUAD $0x486551623c626f48; QUAD $0xc86f487e7162c4fe; QUAD $0x6206c072482df162; QUAD $0xf1620bc0724825f1; QUAD $0x75736219c072481d; QUAD $0x483d1162caca2548; QUAD $0xd42548255362c4fe; QUAD $0x62c1fe483d516296; QUAD $0x45d162c2fe483d51; QUAD $0x724865f162f8fe48; QUAD $0xc4724825f16202c4; QUAD $0x16c472481df1620d; QUAD $0x7362cc6f487e7162; QUAD $0x25d362e8ce254855; QUAD $0x4865d16296dc2548; QUAD $0xd8fe4865d162d9fe; QUAD $0x623d626f487e7162; QUAD $0x7e7162c4fe486d51; QUAD $0x72482df162cf6f48; QUAD $0xc7724825f16206c7; QUAD $0x19c772481df1620b; QUAD $0x62cac925487d7362; QUAD $0x255362c5fe483d11; QUAD $0x483d516296d42548; QUAD $0xc2fe483d5162c1fe; QUAD $0xf162f0fe484dd162; QUAD $0x25f16202c372486d; QUAD $0x481df1620dc37248; QUAD $0x6f487e716216c372; QUAD $0xe8cd25485d7362cb; QUAD $0x6296d4254825d362; QUAD $0x6dd162d1fe486dd1; QUAD $0x6f487e7162d0fe48; QUAD $0xc4fe487551623e62; QUAD $0xf162ce6f487e7162; QUAD $0x25f16206c672482d; QUAD $0x481df1620bc67248; QUAD $0x254845736219c672; QUAD $0xc6fe483d1162cac8; QUAD $0x6296d42548255362; QUAD $0x3d5162c1fe483d51; QUAD $0xfe4855d162c2fe48; QUAD $0x02c2724875f162e8; QUAD $0x620dc2724825f162; QUAD $0x716216c272481df1; QUAD $0x48657362ca6f487e; QUAD $0x254825d362e8cc25; QUAD $0xc9fe4875d16296cc; QUAD $0x7162c8fe4875d162; QUAD $0x7d51623f626f487e; QUAD $0x6f487e7162c4fe48; QUAD $0x06c572482df162cd; QUAD $0x620bc5724825f162; QUAD $0x736219c572481df1; QUAD $0x3d1162cacf25484d; QUAD $0x2548255362c7fe48; QUAD $0xc1fe483d516296d4; QUAD $0xd162c2fe483d5162; QUAD $0x487df162e0fe485d; QUAD $0x724825f16202c172; QUAD $0xc172481df1620dc1; QUAD $0x62c96f487e716216; QUAD $0xd362e8cb25486d73; QUAD $0x7dd16296c4254825; QUAD $0xfe487dd162c1fe48; QUAD $0x40626f487e7162c0; QUAD $0xd162d86f487e7162; QUAD $0x7dd16224046f487e; QUAD $0x6f487e7162c3fe49; QUAD $0x244c6f487ed162d9; QUAD $0x62cbfe4975d16201; QUAD $0x7ed162da6f487e71; QUAD $0x6dd1620224546f48; QUAD $0x6f487e7162d3fe49; QUAD $0x245c6f487ed162db; QUAD $0x62dbfe4965d16203; QUAD $0x7ed162dc6f487e71; QUAD $0x5dd1620424646f48; QUAD $0x6f487e7162e3fe49; QUAD $0x246c6f487ed162dd; QUAD $0x62ebfe4955d16205; QUAD $0x7ed162de6f487e71; QUAD $0x4dd1620624746f48; QUAD $0x6f487e7162f3fe49; QUAD $0x247c6f487ed162df; QUAD $0x62fbfe4945d16207; QUAD $0x7ef162077f487ef1; QUAD $0x487ef162014f7f48; QUAD $0x7f487ef16202577f; QUAD $0x677f487ef162035f; QUAD $0x056f7f487ef16204; QUAD $0x6206777f487ef162; LONG $0x7f487ef1; WORD $0x077f + VZEROUPPER + RET + +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x000(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x008(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x010(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x018(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x020(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x028(SB)/8, $0x0c0d0e0f08090a0b +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x030(SB)/8, $0x0405060700010203 +DATA PSHUFFLE_BYTE_FLIP_MASK<>+0x038(SB)/8, $0x0c0d0e0f08090a0b +GLOBL PSHUFFLE_BYTE_FLIP_MASK<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x000(SB)/8, $0x0000000000000000 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x008(SB)/8, $0x0000000000000001 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x010(SB)/8, $0x0000000000000008 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x018(SB)/8, $0x0000000000000009 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x020(SB)/8, $0x0000000000000004 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x028(SB)/8, $0x0000000000000005 +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x030(SB)/8, $0x000000000000000C +DATA PSHUFFLE_TRANSPOSE16_MASK1<>+0x038(SB)/8, $0x000000000000000D +GLOBL PSHUFFLE_TRANSPOSE16_MASK1<>(SB), 8, $64 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x000(SB)/8, $0x0000000000000002 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x008(SB)/8, $0x0000000000000003 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x010(SB)/8, $0x000000000000000A +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x018(SB)/8, $0x000000000000000B +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x020(SB)/8, $0x0000000000000006 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x028(SB)/8, $0x0000000000000007 +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x030(SB)/8, $0x000000000000000E +DATA PSHUFFLE_TRANSPOSE16_MASK2<>+0x038(SB)/8, $0x000000000000000F +GLOBL PSHUFFLE_TRANSPOSE16_MASK2<>(SB), 8, $64 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go new file mode 100644 index 000000000..eb8a0ff0c --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.go @@ -0,0 +1,22 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +//go:noescape +func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s new file mode 100644 index 000000000..9f444d49f --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockAvx_amd64.s @@ -0,0 +1,408 @@ +//+build !noasm,!appengine + +// SHA256 implementation for AVX + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// This code is based on an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// +// together with the reference implementation from the following authors: +// James Guilford +// Kirk Yap +// Tim Chen +// +// For Golang it has been converted to Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 +// equivalents +// + +#include "textflag.h" + +#define ROTATE_XS \ + MOVOU X4, X15 \ + MOVOU X5, X4 \ + MOVOU X6, X5 \ + MOVOU X7, X6 \ + MOVOU X15, X7 + +// compute s0 four at a time and s1 two at a time +// compute W[-16] + W[-7] 4 at a time +#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + LONG $0x0f41e3c4; WORD $0x04c6 \ // VPALIGNR XMM0,XMM7,XMM6,0x4 /* XTMP0 = W[-7] */ + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL f, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + XORL g, R15 \ // y2 = f^g + LONG $0xc4fef9c5 \ // VPADDD XMM0,XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) + ANDL e, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + \ + \ // compute s0 + \ + LONG $0x0f51e3c4; WORD $0x04cc \ // VPALIGNR XMM1,XMM5,XMM4,0x4 /* XTMP1 = W[-15] */ + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + \ // ROTATE_ARGS + MOVL a, R15 \ // y2 = a + LONG $0xd172e9c5; BYTE $0x07 \ // VPSRLD XMM2,XMM1,0x7 /* */ + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + LONG $0xf172e1c5; BYTE $0x19 \ // VPSLLD XMM3,XMM1,0x19 /* */ + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + LONG $0xdaebe1c5 \ // VPOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL d, R13 \ // y0 = e + MOVL h, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL d, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL e, R15 \ // y2 = f + ROLL $23, R14 \ // y1 = a >> (22-13) + LONG $0xd172e9c5; BYTE $0x12 \ // VPSRLD XMM2,XMM1,0x12 /* */ + XORL h, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL f, R15 \ // y2 = f^g + LONG $0xd172b9c5; BYTE $0x03 \ // VPSRLD XMM8,XMM1,0x3 /* XTMP4 = W[-15] >> 3 */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL d, R15 \ // y2 = (f^g)&e + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xf172f1c5; BYTE $0x0e \ // VPSLLD XMM1,XMM1,0xe /* */ + XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL f, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd9efe1c5 \ // VPXOR XMM3,XMM3,XMM1 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + LONG $0xdaefe1c5 \ // VPXOR XMM3,XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ + MOVL h, R13 \ // y0 = a + ADDL R15, g \ // h = h + S1 + CH + k + w + MOVL h, R15 \ // y2 = a + LONG $0xef61c1c4; BYTE $0xc8 \ // VPXOR XMM1,XMM3,XMM8 /* XTMP1 = s0 */ + ORL b, R13 \ // y0 = a|c + ADDL g, c \ // d = d + h + S1 + CH + k + w + ANDL b, R15 \ // y2 = a&c + \ + \ // compute low s1 + \ + LONG $0xd770f9c5; BYTE $0xfa \ // VPSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ + ANDL a, R13 \ // y0 = (a|c)&b + ADDL R14, g \ // h = h + S1 + CH + k + w + S0 + LONG $0xc1fef9c5 \ // VPADDD XMM0,XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL c, R13 \ // y0 = e + MOVL g, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL c, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL d, R15 \ // y2 = f + XORL g, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + LONG $0xd272b9c5; BYTE $0x0a \ // VPSRLD XMM8,XMM2,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ + XORL e, R15 \ // y2 = f^g + LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ + XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL c, R15 \ // y2 = (f^g)&e + LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL e, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xc2ef39c5 \ // VPXOR XMM8,XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ + MOVL g, R13 \ // y0 = a + ADDL R15, f \ // h = h + S1 + CH + k + w + MOVL g, R15 \ // y2 = a + LONG $0x003942c4; BYTE $0xc2 \ // VPSHUFB XMM8,XMM8,XMM10 /* XTMP4 = s1 {00BA} */ + ORL a, R13 \ // y0 = a|c + ADDL f, b \ // d = d + h + S1 + CH + k + w + ANDL a, R15 \ // y2 = a&c + LONG $0xfe79c1c4; BYTE $0xc0 \ // VPADDD XMM0,XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ + ANDL h, R13 \ // y0 = (a|c)&b + ADDL R14, f \ // h = h + S1 + CH + k + w + S0 + \ + \ // compute high s1 + \ + LONG $0xd070f9c5; BYTE $0x50 \ // VPSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL b, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL f, R14 \ // y1 = a + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL b, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL c, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + LONG $0xd272a1c5; BYTE $0x0a \ // VPSRLD XMM11,XMM2,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ + XORL f, R14 \ // y1 = a ^ (a >> (22-13) + XORL d, R15 \ // y2 = f^g + LONG $0xd273e1c5; BYTE $0x13 \ // VPSRLQ XMM3,XMM2,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ + XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL b, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + LONG $0xd273e9c5; BYTE $0x11 \ // VPSRLQ XMM2,XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ + XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL d, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd3efe9c5 \ // VPXOR XMM2,XMM2,XMM3 /* */ + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xdaef21c5 \ // VPXOR XMM11,XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ + MOVL f, R13 \ // y0 = a + ADDL R15, e \ // h = h + S1 + CH + k + w + MOVL f, R15 \ // y2 = a + LONG $0x002142c4; BYTE $0xdc \ // VPSHUFB XMM11,XMM11,XMM12 /* XTMP5 = s1 {DC00} */ + ORL h, R13 \ // y0 = a|c + ADDL e, a \ // d = d + h + S1 + CH + k + w + ANDL h, R15 \ // y2 = a&c + LONG $0xe0fea1c5 \ // VPADDD XMM4,XMM11,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ + ANDL g, R13 \ // y0 = (a|c)&b + ADDL R14, e \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + ROTATE_XS + +#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL f, R15 \ // y2 = f + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL g, R15 \ // y2 = f^g + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + ANDL e, R15 \ // y2 = (f^g)&e + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + MOVL a, R15 \ // y2 = a + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ + +// func blockAvx(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) +TEXT ·blockAvx(SB), 7, $0-80 + + MOVQ h+0(FP), SI // SI: &h + MOVQ message_base+24(FP), R8 // &message + MOVQ message_len+32(FP), R9 // length of message + CMPQ R9, $0 + JEQ done_hash + ADDQ R8, R9 + MOVQ R9, reserved2+64(FP) // store end of message + + // Register definition + // a --> eax + // b --> ebx + // c --> ecx + // d --> r8d + // e --> edx + // f --> r9d + // g --> r10d + // h --> r11d + // + // y0 --> r13d + // y1 --> r14d + // y2 --> r15d + + MOVL (0*4)(SI), AX // a = H0 + MOVL (1*4)(SI), BX // b = H1 + MOVL (2*4)(SI), CX // c = H2 + MOVL (3*4)(SI), R8 // d = H3 + MOVL (4*4)(SI), DX // e = H4 + MOVL (5*4)(SI), R9 // f = H5 + MOVL (6*4)(SI), R10 // g = H6 + MOVL (7*4)(SI), R11 // h = H7 + + MOVOU bflipMask<>(SB), X13 + MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA + MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 + + MOVQ message_base+24(FP), SI // SI: &message + +loop0: + LEAQ constants<>(SB), BP + + // byte swap first 16 dwords + MOVOU 0*16(SI), X4 + LONG $0x0059c2c4; BYTE $0xe5 // VPSHUFB XMM4, XMM4, XMM13 + MOVOU 1*16(SI), X5 + LONG $0x0051c2c4; BYTE $0xed // VPSHUFB XMM5, XMM5, XMM13 + MOVOU 2*16(SI), X6 + LONG $0x0049c2c4; BYTE $0xf5 // VPSHUFB XMM6, XMM6, XMM13 + MOVOU 3*16(SI), X7 + LONG $0x0041c2c4; BYTE $0xfd // VPSHUFB XMM7, XMM7, XMM13 + + MOVQ SI, reserved3+72(FP) + MOVD $0x3, DI + + // schedule 48 input dwords, by doing 3 rounds of 16 each +loop1: + LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + LONG $0x4dfe59c5; BYTE $0x10 // VPADDD XMM9, XMM4, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + LONG $0x4dfe59c5; BYTE $0x20 // VPADDD XMM9, XMM4, 32[RBP] /* Add 3rd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + LONG $0x4dfe59c5; BYTE $0x30 // VPADDD XMM9, XMM4, 48[RBP] /* Add 4th constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $64, BP + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + SUBQ $1, DI + JNE loop1 + + MOVD $0x2, DI + +loop2: + LONG $0x4dfe59c5; BYTE $0x00 // VPADDD XMM9, XMM4, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) + DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) + DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) + DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) + + LONG $0x4dfe51c5; BYTE $0x10 // VPADDD XMM9, XMM5, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $32, BP + DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) + DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) + DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) + DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) + + MOVOU X6, X4 + MOVOU X7, X5 + + SUBQ $1, DI + JNE loop2 + + MOVQ h+0(FP), SI // SI: &h + ADDL (0*4)(SI), AX // H0 = a + H0 + MOVL AX, (0*4)(SI) + ADDL (1*4)(SI), BX // H1 = b + H1 + MOVL BX, (1*4)(SI) + ADDL (2*4)(SI), CX // H2 = c + H2 + MOVL CX, (2*4)(SI) + ADDL (3*4)(SI), R8 // H3 = d + H3 + MOVL R8, (3*4)(SI) + ADDL (4*4)(SI), DX // H4 = e + H4 + MOVL DX, (4*4)(SI) + ADDL (5*4)(SI), R9 // H5 = f + H5 + MOVL R9, (5*4)(SI) + ADDL (6*4)(SI), R10 // H6 = g + H6 + MOVL R10, (6*4)(SI) + ADDL (7*4)(SI), R11 // H7 = h + H7 + MOVL R11, (7*4)(SI) + + MOVQ reserved3+72(FP), SI + ADDQ $64, SI + CMPQ reserved2+64(FP), SI + JNE loop0 + +done_hash: + RET + +// Constants table +DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 +DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b +DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 +DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be +DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 +DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 +DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 +DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc +DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 +DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e +DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 +DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 +DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c +DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee +DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 +DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa +DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 +DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b + +DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 +DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 + +GLOBL constants<>(SB), 8, $256 +GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 +GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 +GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go new file mode 100644 index 000000000..383189c8c --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.go @@ -0,0 +1,6 @@ +//+build !noasm + +package sha256 + +//go:noescape +func blockSha(h *[8]uint32, message []uint8) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s new file mode 100644 index 000000000..909fc0ef8 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSha_amd64.s @@ -0,0 +1,266 @@ +//+build !noasm,!appengine + +// SHA intrinsic version of SHA256 + +// Kristofer Peterson, (C) 2018. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#include "textflag.h" + +DATA K<>+0x00(SB)/4, $0x428a2f98 +DATA K<>+0x04(SB)/4, $0x71374491 +DATA K<>+0x08(SB)/4, $0xb5c0fbcf +DATA K<>+0x0c(SB)/4, $0xe9b5dba5 +DATA K<>+0x10(SB)/4, $0x3956c25b +DATA K<>+0x14(SB)/4, $0x59f111f1 +DATA K<>+0x18(SB)/4, $0x923f82a4 +DATA K<>+0x1c(SB)/4, $0xab1c5ed5 +DATA K<>+0x20(SB)/4, $0xd807aa98 +DATA K<>+0x24(SB)/4, $0x12835b01 +DATA K<>+0x28(SB)/4, $0x243185be +DATA K<>+0x2c(SB)/4, $0x550c7dc3 +DATA K<>+0x30(SB)/4, $0x72be5d74 +DATA K<>+0x34(SB)/4, $0x80deb1fe +DATA K<>+0x38(SB)/4, $0x9bdc06a7 +DATA K<>+0x3c(SB)/4, $0xc19bf174 +DATA K<>+0x40(SB)/4, $0xe49b69c1 +DATA K<>+0x44(SB)/4, $0xefbe4786 +DATA K<>+0x48(SB)/4, $0x0fc19dc6 +DATA K<>+0x4c(SB)/4, $0x240ca1cc +DATA K<>+0x50(SB)/4, $0x2de92c6f +DATA K<>+0x54(SB)/4, $0x4a7484aa +DATA K<>+0x58(SB)/4, $0x5cb0a9dc +DATA K<>+0x5c(SB)/4, $0x76f988da +DATA K<>+0x60(SB)/4, $0x983e5152 +DATA K<>+0x64(SB)/4, $0xa831c66d +DATA K<>+0x68(SB)/4, $0xb00327c8 +DATA K<>+0x6c(SB)/4, $0xbf597fc7 +DATA K<>+0x70(SB)/4, $0xc6e00bf3 +DATA K<>+0x74(SB)/4, $0xd5a79147 +DATA K<>+0x78(SB)/4, $0x06ca6351 +DATA K<>+0x7c(SB)/4, $0x14292967 +DATA K<>+0x80(SB)/4, $0x27b70a85 +DATA K<>+0x84(SB)/4, $0x2e1b2138 +DATA K<>+0x88(SB)/4, $0x4d2c6dfc +DATA K<>+0x8c(SB)/4, $0x53380d13 +DATA K<>+0x90(SB)/4, $0x650a7354 +DATA K<>+0x94(SB)/4, $0x766a0abb +DATA K<>+0x98(SB)/4, $0x81c2c92e +DATA K<>+0x9c(SB)/4, $0x92722c85 +DATA K<>+0xa0(SB)/4, $0xa2bfe8a1 +DATA K<>+0xa4(SB)/4, $0xa81a664b +DATA K<>+0xa8(SB)/4, $0xc24b8b70 +DATA K<>+0xac(SB)/4, $0xc76c51a3 +DATA K<>+0xb0(SB)/4, $0xd192e819 +DATA K<>+0xb4(SB)/4, $0xd6990624 +DATA K<>+0xb8(SB)/4, $0xf40e3585 +DATA K<>+0xbc(SB)/4, $0x106aa070 +DATA K<>+0xc0(SB)/4, $0x19a4c116 +DATA K<>+0xc4(SB)/4, $0x1e376c08 +DATA K<>+0xc8(SB)/4, $0x2748774c +DATA K<>+0xcc(SB)/4, $0x34b0bcb5 +DATA K<>+0xd0(SB)/4, $0x391c0cb3 +DATA K<>+0xd4(SB)/4, $0x4ed8aa4a +DATA K<>+0xd8(SB)/4, $0x5b9cca4f +DATA K<>+0xdc(SB)/4, $0x682e6ff3 +DATA K<>+0xe0(SB)/4, $0x748f82ee +DATA K<>+0xe4(SB)/4, $0x78a5636f +DATA K<>+0xe8(SB)/4, $0x84c87814 +DATA K<>+0xec(SB)/4, $0x8cc70208 +DATA K<>+0xf0(SB)/4, $0x90befffa +DATA K<>+0xf4(SB)/4, $0xa4506ceb +DATA K<>+0xf8(SB)/4, $0xbef9a3f7 +DATA K<>+0xfc(SB)/4, $0xc67178f2 +GLOBL K<>(SB), RODATA|NOPTR, $256 + +DATA SHUF_MASK<>+0x00(SB)/8, $0x0405060700010203 +DATA SHUF_MASK<>+0x08(SB)/8, $0x0c0d0e0f08090a0b +GLOBL SHUF_MASK<>(SB), RODATA|NOPTR, $16 + +// Register Usage +// BX base address of constant table (constant) +// DX hash_state (constant) +// SI hash_data.data +// DI hash_data.data + hash_data.length - 64 (constant) +// X0 scratch +// X1 scratch +// X2 working hash state // ABEF +// X3 working hash state // CDGH +// X4 first 16 bytes of block +// X5 second 16 bytes of block +// X6 third 16 bytes of block +// X7 fourth 16 bytes of block +// X12 saved hash state // ABEF +// X13 saved hash state // CDGH +// X15 data shuffle mask (constant) + +TEXT ·blockSha(SB), NOSPLIT, $0-32 + MOVQ h+0(FP), DX + MOVQ message_base+8(FP), SI + MOVQ message_len+16(FP), DI + LEAQ -64(SI)(DI*1), DI + MOVOU (DX), X2 + MOVOU 16(DX), X1 + MOVO X2, X3 + PUNPCKLLQ X1, X2 + PUNPCKHLQ X1, X3 + PSHUFD $0x27, X2, X2 + PSHUFD $0x27, X3, X3 + MOVO SHUF_MASK<>(SB), X15 + LEAQ K<>(SB), BX + + JMP TEST + +LOOP: + MOVO X2, X12 + MOVO X3, X13 + + // load block and shuffle + MOVOU (SI), X4 + MOVOU 16(SI), X5 + MOVOU 32(SI), X6 + MOVOU 48(SI), X7 + PSHUFB X15, X4 + PSHUFB X15, X5 + PSHUFB X15, X6 + PSHUFB X15, X7 + +#define ROUND456 \ + PADDL X5, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 \ + LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 \ + LONG $0xf5cd380f \ // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + +#define ROUND567 \ + PADDL X6, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 \ + LONG $0x0f3a0f66; WORD $0x04cd \ // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 \ + LONG $0xfecd380f \ // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + +#define ROUND674 \ + PADDL X7, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X7, X1 \ + LONG $0x0f3a0f66; WORD $0x04ce \ // PALIGNR XMM1, XMM6, 4 + PADDL X1, X4 \ + LONG $0xe7cd380f \ // SHA256MSG2 XMM4, XMM7 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xf7cc380f // SHA256MSG1 XMM6, XMM7 + +#define ROUND745 \ + PADDL X4, X0 \ + LONG $0xdacb380f \ // SHA256RNDS2 XMM3, XMM2 + MOVO X4, X1 \ + LONG $0x0f3a0f66; WORD $0x04cf \ // PALIGNR XMM1, XMM7, 4 + PADDL X1, X5 \ + LONG $0xeccd380f \ // SHA256MSG2 XMM5, XMM4 + PSHUFD $0x4e, X0, X0 \ + LONG $0xd3cb380f \ // SHA256RNDS2 XMM2, XMM3 + LONG $0xfccc380f // SHA256MSG1 XMM7, XMM4 + + // rounds 0-3 + MOVO (BX), X0 + PADDL X4, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 4-7 + MOVO 1*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xe5cc380f // SHA256MSG1 XMM4, XMM5 + + // rounds 8-11 + MOVO 2*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + LONG $0xeecc380f // SHA256MSG1 XMM5, XMM6 + + MOVO 3*16(BX), X0; ROUND674 // rounds 12-15 + MOVO 4*16(BX), X0; ROUND745 // rounds 16-19 + MOVO 5*16(BX), X0; ROUND456 // rounds 20-23 + MOVO 6*16(BX), X0; ROUND567 // rounds 24-27 + MOVO 7*16(BX), X0; ROUND674 // rounds 28-31 + MOVO 8*16(BX), X0; ROUND745 // rounds 32-35 + MOVO 9*16(BX), X0; ROUND456 // rounds 36-39 + MOVO 10*16(BX), X0; ROUND567 // rounds 40-43 + MOVO 11*16(BX), X0; ROUND674 // rounds 44-47 + MOVO 12*16(BX), X0; ROUND745 // rounds 48-51 + + // rounds 52-55 + MOVO 13*16(BX), X0 + PADDL X5, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X5, X1 + LONG $0x0f3a0f66; WORD $0x04cc // PALIGNR XMM1, XMM4, 4 + PADDL X1, X6 + LONG $0xf5cd380f // SHA256MSG2 XMM6, XMM5 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 56-59 + MOVO 14*16(BX), X0 + PADDL X6, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + MOVO X6, X1 + LONG $0x0f3a0f66; WORD $0x04cd // PALIGNR XMM1, XMM5, 4 + PADDL X1, X7 + LONG $0xfecd380f // SHA256MSG2 XMM7, XMM6 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + // rounds 60-63 + MOVO 15*16(BX), X0 + PADDL X7, X0 + LONG $0xdacb380f // SHA256RNDS2 XMM3, XMM2 + PSHUFD $0x4e, X0, X0 + LONG $0xd3cb380f // SHA256RNDS2 XMM2, XMM3 + + PADDL X12, X2 + PADDL X13, X3 + + ADDQ $64, SI + +TEST: + CMPQ SI, DI + JBE LOOP + + PSHUFD $0x4e, X3, X0 + LONG $0x0e3a0f66; WORD $0xf0c2 // PBLENDW XMM0, XMM2, 0xf0 + PSHUFD $0x4e, X2, X1 + LONG $0x0e3a0f66; WORD $0x0fcb // PBLENDW XMM1, XMM3, 0x0f + PSHUFD $0x1b, X0, X0 + PSHUFD $0x1b, X1, X1 + + MOVOU X0, (DX) + MOVOU X1, 16(DX) + + RET diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go new file mode 100644 index 000000000..54abbb0f0 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.go @@ -0,0 +1,22 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +//go:noescape +func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) diff --git a/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s new file mode 100644 index 000000000..7afb45c87 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256blockSsse_amd64.s @@ -0,0 +1,429 @@ +//+build !noasm,!appengine + +// SHA256 implementation for SSSE3 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// This code is based on an Intel White-Paper: +// "Fast SHA-256 Implementations on Intel Architecture Processors" +// +// together with the reference implementation from the following authors: +// James Guilford +// Kirk Yap +// Tim Chen +// +// For Golang it has been converted to Plan 9 assembly with the help of +// github.com/minio/asm2plan9s to assemble Intel instructions to their Plan9 +// equivalents +// + +#include "textflag.h" + +#define ROTATE_XS \ + MOVOU X4, X15 \ + MOVOU X5, X4 \ + MOVOU X6, X5 \ + MOVOU X7, X6 \ + MOVOU X15, X7 + +// compute s0 four at a time and s1 two at a time +// compute W[-16] + W[-7] 4 at a time +#define FOUR_ROUNDS_AND_SCHED(a, b, c, d, e, f, g, h) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + MOVOU X7, X0 \ + LONG $0x0f3a0f66; WORD $0x04c6 \ // PALIGNR XMM0,XMM6,0x4 /* XTMP0 = W[-7] */ + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL f, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + XORL g, R15 \ // y2 = f^g + LONG $0xc4fe0f66 \ // PADDD XMM0,XMM4 /* XTMP0 = W[-7] + W[-16] */ + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6) ) + ANDL e, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + \ + \ // compute s0 + \ + MOVOU X5, X1 \ + LONG $0x0f3a0f66; WORD $0x04cc \ // PALIGNR XMM1,XMM4,0x4 /* XTMP1 = W[-15] */ + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+48(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + \ // ROTATE_ARGS + MOVL a, R15 \ // y2 = a + MOVOU X1, X2 \ + LONG $0xd2720f66; BYTE $0x07 \ // PSRLD XMM2,0x7 /* */ + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + MOVOU X1, X3 \ + LONG $0xf3720f66; BYTE $0x19 \ // PSLLD XMM3,0x19 /* */ + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + LONG $0xdaeb0f66 \ // POR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL d, R13 \ // y0 = e + MOVL h, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL d, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL e, R15 \ // y2 = f + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVOU X1, X2 \ + LONG $0xd2720f66; BYTE $0x12 \ // PSRLD XMM2,0x12 /* */ + XORL h, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL f, R15 \ // y2 = f^g + MOVOU X1, X8 \ + LONG $0x720f4166; WORD $0x03d0 \ // PSRLD XMM8,0x3 /* XTMP4 = W[-15] >> 3 */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL d, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL d, R15 \ // y2 = (f^g)&e + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xf1720f66; BYTE $0x0e \ // PSLLD XMM1,0xe /* */ + XORL h, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL f, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd9ef0f66 \ // PXOR XMM3,XMM1 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+52(FP), R15 \ // y2 = k + w + S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + LONG $0xdaef0f66 \ // PXOR XMM3,XMM2 /* XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR */ + MOVL h, R13 \ // y0 = a + ADDL R15, g \ // h = h + S1 + CH + k + w + MOVL h, R15 \ // y2 = a + MOVOU X3, X1 \ + LONG $0xef0f4166; BYTE $0xc8 \ // PXOR XMM1,XMM8 /* XTMP1 = s0 */ + ORL b, R13 \ // y0 = a|c + ADDL g, c \ // d = d + h + S1 + CH + k + w + ANDL b, R15 \ // y2 = a&c + \ + \ // compute low s1 + \ + LONG $0xd7700f66; BYTE $0xfa \ // PSHUFD XMM2,XMM7,0xfa /* XTMP2 = W[-2] {BBAA} */ + ANDL a, R13 \ // y0 = (a|c)&b + ADDL R14, g \ // h = h + S1 + CH + k + w + S0 + LONG $0xc1fe0f66 \ // PADDD XMM0,XMM1 /* XTMP0 = W[-16] + W[-7] + s0 */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, g \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL c, R13 \ // y0 = e + MOVL g, R14 \ // y1 = a + ROLL $18, R13 \ // y0 = e >> (25-11) + XORL c, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL d, R15 \ // y2 = f + XORL g, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + MOVOU X2, X8 \ + LONG $0x720f4166; WORD $0x0ad0 \ // PSRLD XMM8,0xa /* XTMP4 = W[-2] >> 10 {BBAA} */ + XORL e, R15 \ // y2 = f^g + MOVOU X2, X3 \ + LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xBxA} */ + XORL c, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL c, R15 \ // y2 = (f^g)&e + LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xBxA} */ + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + XORL g, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + XORL e, R15 \ // y2 = CH = ((f^g)&e)^g + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+56(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xef0f4466; BYTE $0xc2 \ // PXOR XMM8,XMM2 /* XTMP4 = s1 {xBxA} */ + MOVL g, R13 \ // y0 = a + ADDL R15, f \ // h = h + S1 + CH + k + w + MOVL g, R15 \ // y2 = a + LONG $0x380f4566; WORD $0xc200 \ // PSHUFB XMM8,XMM10 /* XTMP4 = s1 {00BA} */ + ORL a, R13 \ // y0 = a|c + ADDL f, b \ // d = d + h + S1 + CH + k + w + ANDL a, R15 \ // y2 = a&c + LONG $0xfe0f4166; BYTE $0xc0 \ // PADDD XMM0,XMM8 /* XTMP0 = {..., ..., W[1], W[0]} */ + ANDL h, R13 \ // y0 = (a|c)&b + ADDL R14, f \ // h = h + S1 + CH + k + w + S0 + \ + \ // compute high s1 + \ + LONG $0xd0700f66; BYTE $0x50 \ // PSHUFD XMM2,XMM0,0x50 /* XTMP2 = W[-2] {DDCC} */ + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, f \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + MOVL b, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL f, R14 \ // y1 = a + ROLL $23, R14 \ // y1 = a >> (22-13) + XORL b, R13 \ // y0 = e ^ (e >> (25-11)) + MOVL c, R15 \ // y2 = f + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + MOVOU X2, X11 \ + LONG $0x720f4166; WORD $0x0ad3 \ // PSRLD XMM11,0xa /* XTMP5 = W[-2] >> 10 {DDCC} */ + XORL f, R14 \ // y1 = a ^ (a >> (22-13) + XORL d, R15 \ // y2 = f^g + MOVOU X2, X3 \ + LONG $0xd3730f66; BYTE $0x13 \ // PSRLQ XMM3,0x13 /* XTMP3 = W[-2] MY_ROR 19 {xDxC} */ + XORL b, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ANDL b, R15 \ // y2 = (f^g)&e + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + LONG $0xd2730f66; BYTE $0x11 \ // PSRLQ XMM2,0x11 /* XTMP2 = W[-2] MY_ROR 17 {xDxC} */ + XORL f, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL d, R15 \ // y2 = CH = ((f^g)&e)^g + LONG $0xd3ef0f66 \ // PXOR XMM2,XMM3 /* */ + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL R13, R15 \ // y2 = S1 + CH + ADDL _xfer+60(FP), R15 \ // y2 = k + w + S1 + CH + LONG $0xef0f4466; BYTE $0xda \ // PXOR XMM11,XMM2 /* XTMP5 = s1 {xDxC} */ + MOVL f, R13 \ // y0 = a + ADDL R15, e \ // h = h + S1 + CH + k + w + MOVL f, R15 \ // y2 = a + LONG $0x380f4566; WORD $0xdc00 \ // PSHUFB XMM11,XMM12 /* XTMP5 = s1 {DC00} */ + ORL h, R13 \ // y0 = a|c + ADDL e, a \ // d = d + h + S1 + CH + k + w + ANDL h, R15 \ // y2 = a&c + MOVOU X11, X4 \ + LONG $0xe0fe0f66 \ // PADDD XMM4,XMM0 /* X0 = {W[3], W[2], W[1], W[0]} */ + ANDL g, R13 \ // y0 = (a|c)&b + ADDL R14, e \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, e \ // h = h + S1 + CH + k + w + S0 + MAJ + \ // ROTATE_ARGS + ROTATE_XS + +#define DO_ROUND(a, b, c, d, e, f, g, h, offset) \ + MOVL e, R13 \ // y0 = e + ROLL $18, R13 \ // y0 = e >> (25-11) + MOVL a, R14 \ // y1 = a + XORL e, R13 \ // y0 = e ^ (e >> (25-11)) + ROLL $23, R14 \ // y1 = a >> (22-13) + MOVL f, R15 \ // y2 = f + XORL a, R14 \ // y1 = a ^ (a >> (22-13) + ROLL $27, R13 \ // y0 = (e >> (11-6)) ^ (e >> (25-6)) + XORL g, R15 \ // y2 = f^g + XORL e, R13 \ // y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ROLL $21, R14 \ // y1 = (a >> (13-2)) ^ (a >> (22-2)) + ANDL e, R15 \ // y2 = (f^g)&e + XORL a, R14 \ // y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ROLL $26, R13 \ // y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + XORL g, R15 \ // y2 = CH = ((f^g)&e)^g + ADDL R13, R15 \ // y2 = S1 + CH + ROLL $30, R14 \ // y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + ADDL _xfer+offset(FP), R15 \ // y2 = k + w + S1 + CH + MOVL a, R13 \ // y0 = a + ADDL R15, h \ // h = h + S1 + CH + k + w + MOVL a, R15 \ // y2 = a + ORL c, R13 \ // y0 = a|c + ADDL h, d \ // d = d + h + S1 + CH + k + w + ANDL c, R15 \ // y2 = a&c + ANDL b, R13 \ // y0 = (a|c)&b + ADDL R14, h \ // h = h + S1 + CH + k + w + S0 + ORL R15, R13 \ // y0 = MAJ = (a|c)&b)|(a&c) + ADDL R13, h // h = h + S1 + CH + k + w + S0 + MAJ + +// func blockSsse(h []uint32, message []uint8, reserved0, reserved1, reserved2, reserved3 uint64) +TEXT ·blockSsse(SB), 7, $0-80 + + MOVQ h+0(FP), SI // SI: &h + MOVQ message_base+24(FP), R8 // &message + MOVQ message_len+32(FP), R9 // length of message + CMPQ R9, $0 + JEQ done_hash + ADDQ R8, R9 + MOVQ R9, reserved2+64(FP) // store end of message + + // Register definition + // a --> eax + // b --> ebx + // c --> ecx + // d --> r8d + // e --> edx + // f --> r9d + // g --> r10d + // h --> r11d + // + // y0 --> r13d + // y1 --> r14d + // y2 --> r15d + + MOVL (0*4)(SI), AX // a = H0 + MOVL (1*4)(SI), BX // b = H1 + MOVL (2*4)(SI), CX // c = H2 + MOVL (3*4)(SI), R8 // d = H3 + MOVL (4*4)(SI), DX // e = H4 + MOVL (5*4)(SI), R9 // f = H5 + MOVL (6*4)(SI), R10 // g = H6 + MOVL (7*4)(SI), R11 // h = H7 + + MOVOU bflipMask<>(SB), X13 + MOVOU shuf00BA<>(SB), X10 // shuffle xBxA -> 00BA + MOVOU shufDC00<>(SB), X12 // shuffle xDxC -> DC00 + + MOVQ message_base+24(FP), SI // SI: &message + +loop0: + LEAQ constants<>(SB), BP + + // byte swap first 16 dwords + MOVOU 0*16(SI), X4 + LONG $0x380f4166; WORD $0xe500 // PSHUFB XMM4, XMM13 + MOVOU 1*16(SI), X5 + LONG $0x380f4166; WORD $0xed00 // PSHUFB XMM5, XMM13 + MOVOU 2*16(SI), X6 + LONG $0x380f4166; WORD $0xf500 // PSHUFB XMM6, XMM13 + MOVOU 3*16(SI), X7 + LONG $0x380f4166; WORD $0xfd00 // PSHUFB XMM7, XMM13 + + MOVQ SI, reserved3+72(FP) + MOVD $0x3, DI + + // Align + // nop WORD PTR [rax+rax*1+0x0] + + // schedule 48 input dwords, by doing 3 rounds of 16 each +loop1: + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x204d // PADDD XMM9, 32[RBP] /* Add 3rd constant to message */ + MOVOU X9, reserved0+48(FP) + FOUR_ROUNDS_AND_SCHED(AX, BX, CX, R8, DX, R9, R10, R11) + + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x304d // PADDD XMM9, 48[RBP] /* Add 4th constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $64, BP + FOUR_ROUNDS_AND_SCHED(DX, R9, R10, R11, AX, BX, CX, R8) + + SUBQ $1, DI + JNE loop1 + + MOVD $0x2, DI + +loop2: + MOVOU X4, X9 + LONG $0xfe0f4466; WORD $0x004d // PADDD XMM9, 0[RBP] /* Add 1st constant to first part of message */ + MOVOU X9, reserved0+48(FP) + DO_ROUND( AX, BX, CX, R8, DX, R9, R10, R11, 48) + DO_ROUND(R11, AX, BX, CX, R8, DX, R9, R10, 52) + DO_ROUND(R10, R11, AX, BX, CX, R8, DX, R9, 56) + DO_ROUND( R9, R10, R11, AX, BX, CX, R8, DX, 60) + + MOVOU X5, X9 + LONG $0xfe0f4466; WORD $0x104d // PADDD XMM9, 16[RBP] /* Add 2nd constant to message */ + MOVOU X9, reserved0+48(FP) + ADDQ $32, BP + DO_ROUND( DX, R9, R10, R11, AX, BX, CX, R8, 48) + DO_ROUND( R8, DX, R9, R10, R11, AX, BX, CX, 52) + DO_ROUND( CX, R8, DX, R9, R10, R11, AX, BX, 56) + DO_ROUND( BX, CX, R8, DX, R9, R10, R11, AX, 60) + + MOVOU X6, X4 + MOVOU X7, X5 + + SUBQ $1, DI + JNE loop2 + + MOVQ h+0(FP), SI // SI: &h + ADDL (0*4)(SI), AX // H0 = a + H0 + MOVL AX, (0*4)(SI) + ADDL (1*4)(SI), BX // H1 = b + H1 + MOVL BX, (1*4)(SI) + ADDL (2*4)(SI), CX // H2 = c + H2 + MOVL CX, (2*4)(SI) + ADDL (3*4)(SI), R8 // H3 = d + H3 + MOVL R8, (3*4)(SI) + ADDL (4*4)(SI), DX // H4 = e + H4 + MOVL DX, (4*4)(SI) + ADDL (5*4)(SI), R9 // H5 = f + H5 + MOVL R9, (5*4)(SI) + ADDL (6*4)(SI), R10 // H6 = g + H6 + MOVL R10, (6*4)(SI) + ADDL (7*4)(SI), R11 // H7 = h + H7 + MOVL R11, (7*4)(SI) + + MOVQ reserved3+72(FP), SI + ADDQ $64, SI + CMPQ reserved2+64(FP), SI + JNE loop0 + +done_hash: + RET + +// Constants table +DATA constants<>+0x0(SB)/8, $0x71374491428a2f98 +DATA constants<>+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA constants<>+0x10(SB)/8, $0x59f111f13956c25b +DATA constants<>+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA constants<>+0x20(SB)/8, $0x12835b01d807aa98 +DATA constants<>+0x28(SB)/8, $0x550c7dc3243185be +DATA constants<>+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA constants<>+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA constants<>+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA constants<>+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA constants<>+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA constants<>+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA constants<>+0x60(SB)/8, $0xa831c66d983e5152 +DATA constants<>+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA constants<>+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA constants<>+0x78(SB)/8, $0x1429296706ca6351 +DATA constants<>+0x80(SB)/8, $0x2e1b213827b70a85 +DATA constants<>+0x88(SB)/8, $0x53380d134d2c6dfc +DATA constants<>+0x90(SB)/8, $0x766a0abb650a7354 +DATA constants<>+0x98(SB)/8, $0x92722c8581c2c92e +DATA constants<>+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA constants<>+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA constants<>+0xb0(SB)/8, $0xd6990624d192e819 +DATA constants<>+0xb8(SB)/8, $0x106aa070f40e3585 +DATA constants<>+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA constants<>+0xc8(SB)/8, $0x34b0bcb52748774c +DATA constants<>+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA constants<>+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA constants<>+0xe0(SB)/8, $0x78a5636f748f82ee +DATA constants<>+0xe8(SB)/8, $0x8cc7020884c87814 +DATA constants<>+0xf0(SB)/8, $0xa4506ceb90befffa +DATA constants<>+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +DATA bflipMask<>+0x00(SB)/8, $0x0405060700010203 +DATA bflipMask<>+0x08(SB)/8, $0x0c0d0e0f08090a0b + +DATA shuf00BA<>+0x00(SB)/8, $0x0b0a090803020100 +DATA shuf00BA<>+0x08(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA shufDC00<>+0x00(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA shufDC00<>+0x08(SB)/8, $0x0b0a090803020100 + +GLOBL constants<>(SB), 8, $256 +GLOBL bflipMask<>(SB), (NOPTR+RODATA), $16 +GLOBL shuf00BA<>(SB), (NOPTR+RODATA), $16 +GLOBL shufDC00<>(SB), (NOPTR+RODATA), $16 diff --git a/vendor/github.com/minio/sha256-simd/sha256block_386.go b/vendor/github.com/minio/sha256-simd/sha256block_386.go new file mode 100644 index 000000000..a4153b918 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_386.go @@ -0,0 +1,25 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockArmGo(dig *digest, p []byte) {} +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockShaGo(dig *digest, p []byte) {} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_amd64.go b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go new file mode 100644 index 000000000..8d341fcfc --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_amd64.go @@ -0,0 +1,53 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockArmGo(dig *digest, p []byte) {} + +func blockAvxGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockAvx(h[:], p[:], 0, 0, 0, 0) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] +} + +func blockAvx2Go(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockAvx2(h[:], p[:]) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] +} + +func blockSsseGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockSsse(h[:], p[:], 0, 0, 0, 0) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] +} + +func blockShaGo(dig *digest, p []byte) { + + blockSha(&dig.h, p) +} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm.go b/vendor/github.com/minio/sha256-simd/sha256block_arm.go new file mode 100644 index 000000000..1191c0863 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm.go @@ -0,0 +1,25 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockShaGo(dig *digest, p []byte) {} +func blockArmGo(dig *digest, p []byte) {} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.go b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go new file mode 100644 index 000000000..4441b0c23 --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.go @@ -0,0 +1,37 @@ +//+build !noasm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockShaGo(dig *digest, p []byte) {} + +//go:noescape +func blockArm(h []uint32, message []uint8) + +func blockArmGo(dig *digest, p []byte) { + + h := []uint32{dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]} + + blockArm(h[:], p[:]) + + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h[0], h[1], h[2], h[3], h[4], + h[5], h[6], h[7] +} diff --git a/vendor/github.com/minio/sha256-simd/sha256block_arm64.s b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s new file mode 100644 index 000000000..db816ac6b --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_arm64.s @@ -0,0 +1,192 @@ +//+build !noasm !appengine + +// ARM64 version of SHA256 + +// +// Minio Cloud Storage, (C) 2016 Minio, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// +// Based on implementation as found in https://github.com/jocover/sha256-armv8 +// +// Use github.com/minio/asm2plan9s on this file to assemble ARM instructions to +// their Plan9 equivalents +// + +TEXT ·blockArm(SB), 7, $0 + MOVD h+0(FP), R0 + MOVD message+24(FP), R1 + MOVD lenmessage+32(FP), R2 // length of message + SUBS $64, R2 + BMI complete + + // Load constants table pointer + MOVD $·constants(SB), R3 + + // Cache constants table in registers v16 - v31 + WORD $0x4cdf2870 // ld1 {v16.4s-v19.4s}, [x3], #64 + WORD $0x4cdf7800 // ld1 {v0.4s}, [x0], #16 + WORD $0x4cdf2874 // ld1 {v20.4s-v23.4s}, [x3], #64 + + WORD $0x4c407801 // ld1 {v1.4s}, [x0] + WORD $0x4cdf2878 // ld1 {v24.4s-v27.4s}, [x3], #64 + WORD $0xd1004000 // sub x0, x0, #0x10 + WORD $0x4cdf287c // ld1 {v28.4s-v31.4s}, [x3], #64 + +loop: + // Main loop + WORD $0x4cdf2025 // ld1 {v5.16b-v8.16b}, [x1], #64 + WORD $0x4ea01c02 // mov v2.16b, v0.16b + WORD $0x4ea11c23 // mov v3.16b, v1.16b + WORD $0x6e2008a5 // rev32 v5.16b, v5.16b + WORD $0x6e2008c6 // rev32 v6.16b, v6.16b + WORD $0x4eb084a9 // add v9.4s, v5.4s, v16.4s + WORD $0x6e2008e7 // rev32 v7.16b, v7.16b + WORD $0x4eb184ca // add v10.4s, v6.4s, v17.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x6e200908 // rev32 v8.16b, v8.16b + WORD $0x4eb284e9 // add v9.4s, v7.4s, v18.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb3850a // add v10.4s, v8.4s, v19.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb484a9 // add v9.4s, v5.4s, v20.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb584ca // add v10.4s, v6.4s, v21.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eb684e9 // add v9.4s, v7.4s, v22.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4eb7850a // add v10.4s, v8.4s, v23.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4eb884a9 // add v9.4s, v5.4s, v24.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4eb984ca // add v10.4s, v6.4s, v25.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e2828c5 // sha256su0 v5.4s, v6.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4eba84e9 // add v9.4s, v7.4s, v26.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828e6 // sha256su0 v6.4s, v7.4s + WORD $0x5e0860e5 // sha256su1 v5.4s, v7.4s, v8.4s + WORD $0x4ebb850a // add v10.4s, v8.4s, v27.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e282907 // sha256su0 v7.4s, v8.4s + WORD $0x5e056106 // sha256su1 v6.4s, v8.4s, v5.4s + WORD $0x4ebc84a9 // add v9.4s, v5.4s, v28.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x5e2828a8 // sha256su0 v8.4s, v5.4s + WORD $0x5e0660a7 // sha256su1 v7.4s, v5.4s, v6.4s + WORD $0x4ebd84ca // add v10.4s, v6.4s, v29.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x5e0760c8 // sha256su1 v8.4s, v6.4s, v7.4s + WORD $0x4ebe84e9 // add v9.4s, v7.4s, v30.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ebf850a // add v10.4s, v8.4s, v31.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e094062 // sha256h q2, q3, v9.4s + WORD $0x5e095083 // sha256h2 q3, q4, v9.4s + WORD $0x4ea21c44 // mov v4.16b, v2.16b + WORD $0x5e0a4062 // sha256h q2, q3, v10.4s + WORD $0x5e0a5083 // sha256h2 q3, q4, v10.4s + WORD $0x4ea38421 // add v1.4s, v1.4s, v3.4s + WORD $0x4ea28400 // add v0.4s, v0.4s, v2.4s + + SUBS $64, R2 + BPL loop + + // Store result + WORD $0x4c00a800 // st1 {v0.4s, v1.4s}, [x0] + +complete: + RET + +// Constants table +DATA ·constants+0x0(SB)/8, $0x71374491428a2f98 +DATA ·constants+0x8(SB)/8, $0xe9b5dba5b5c0fbcf +DATA ·constants+0x10(SB)/8, $0x59f111f13956c25b +DATA ·constants+0x18(SB)/8, $0xab1c5ed5923f82a4 +DATA ·constants+0x20(SB)/8, $0x12835b01d807aa98 +DATA ·constants+0x28(SB)/8, $0x550c7dc3243185be +DATA ·constants+0x30(SB)/8, $0x80deb1fe72be5d74 +DATA ·constants+0x38(SB)/8, $0xc19bf1749bdc06a7 +DATA ·constants+0x40(SB)/8, $0xefbe4786e49b69c1 +DATA ·constants+0x48(SB)/8, $0x240ca1cc0fc19dc6 +DATA ·constants+0x50(SB)/8, $0x4a7484aa2de92c6f +DATA ·constants+0x58(SB)/8, $0x76f988da5cb0a9dc +DATA ·constants+0x60(SB)/8, $0xa831c66d983e5152 +DATA ·constants+0x68(SB)/8, $0xbf597fc7b00327c8 +DATA ·constants+0x70(SB)/8, $0xd5a79147c6e00bf3 +DATA ·constants+0x78(SB)/8, $0x1429296706ca6351 +DATA ·constants+0x80(SB)/8, $0x2e1b213827b70a85 +DATA ·constants+0x88(SB)/8, $0x53380d134d2c6dfc +DATA ·constants+0x90(SB)/8, $0x766a0abb650a7354 +DATA ·constants+0x98(SB)/8, $0x92722c8581c2c92e +DATA ·constants+0xa0(SB)/8, $0xa81a664ba2bfe8a1 +DATA ·constants+0xa8(SB)/8, $0xc76c51a3c24b8b70 +DATA ·constants+0xb0(SB)/8, $0xd6990624d192e819 +DATA ·constants+0xb8(SB)/8, $0x106aa070f40e3585 +DATA ·constants+0xc0(SB)/8, $0x1e376c0819a4c116 +DATA ·constants+0xc8(SB)/8, $0x34b0bcb52748774c +DATA ·constants+0xd0(SB)/8, $0x4ed8aa4a391c0cb3 +DATA ·constants+0xd8(SB)/8, $0x682e6ff35b9cca4f +DATA ·constants+0xe0(SB)/8, $0x78a5636f748f82ee +DATA ·constants+0xe8(SB)/8, $0x8cc7020884c87814 +DATA ·constants+0xf0(SB)/8, $0xa4506ceb90befffa +DATA ·constants+0xf8(SB)/8, $0xc67178f2bef9a3f7 + +GLOBL ·constants(SB), 8, $256 + diff --git a/vendor/github.com/minio/sha256-simd/sha256block_other.go b/vendor/github.com/minio/sha256-simd/sha256block_other.go new file mode 100644 index 000000000..0930d2a2c --- /dev/null +++ b/vendor/github.com/minio/sha256-simd/sha256block_other.go @@ -0,0 +1,25 @@ +//+build noasm appengine ppc64 ppc64le mips mipsle mips64 mips64le s390x wasm + +/* + * Minio Cloud Storage, (C) 2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha256 + +func blockAvx2Go(dig *digest, p []byte) {} +func blockAvxGo(dig *digest, p []byte) {} +func blockSsseGo(dig *digest, p []byte) {} +func blockShaGo(dig *digest, p []byte) {} +func blockArmGo(dig *digest, p []byte) {} diff --git a/vendor/github.com/spacemonkeygo/errors/.travis.yml b/vendor/github.com/spacemonkeygo/errors/.travis.yml new file mode 100644 index 000000000..d2b67f69c --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.7 + - 1.8 + - tip diff --git a/vendor/github.com/spacemonkeygo/errors/LICENSE b/vendor/github.com/spacemonkeygo/errors/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spacemonkeygo/errors/README.md b/vendor/github.com/spacemonkeygo/errors/README.md new file mode 100644 index 000000000..c5eed141e --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/README.md @@ -0,0 +1,19 @@ +# errors [![Build Status](https://api.travis-ci.org/spacemonkeygo/errors.svg?branch=master)](https://travis-ci.org/spacemonkeygo/errors) + +Please see http://godoc.org/github.com/spacemonkeygo/errors for info + +### License + +Copyright (C) 2014 Space Monkey, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/spacemonkeygo/errors/config.go b/vendor/github.com/spacemonkeygo/errors/config.go new file mode 100644 index 000000000..3a2f2ee03 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/config.go @@ -0,0 +1,24 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +// Config is a configuration struct meant to be used with +// github.com/spacemonkeygo/flagfile/utils.Setup +// but can be set independently. +var Config = struct { + Stacklogsize int `default:"4096" usage:"the max stack trace byte length to log"` +}{ + Stacklogsize: 4096, +} diff --git a/vendor/github.com/spacemonkeygo/errors/ctx17.go b/vendor/github.com/spacemonkeygo/errors/ctx17.go new file mode 100644 index 000000000..6d7229901 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/ctx17.go @@ -0,0 +1,30 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// TODO: remove this build restriction once appengine supports 1.7 + +// +build !appengine + +package errors + +import ( + "context" +) + +var ( + contextCanceled = context.Canceled + contextDeadlineExceeded = context.DeadlineExceeded +) diff --git a/vendor/github.com/spacemonkeygo/errors/data_keys.go b/vendor/github.com/spacemonkeygo/errors/data_keys.go new file mode 100644 index 000000000..d6189bd60 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/data_keys.go @@ -0,0 +1,32 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "sync/atomic" +) + +var ( + lastId int32 = 0 +) + +// DataKey's job is to make sure that keys in each error instances namespace +// are lexically scoped, thus helping developers not step on each others' toes +// between large packages. You can only store data on an error using a DataKey, +// and you can only make DataKeys with GenSym(). +type DataKey struct{ id int32 } + +// GenSym generates a brand new, never-before-seen DataKey +func GenSym() DataKey { return DataKey{id: atomic.AddInt32(&lastId, 1)} } diff --git a/vendor/github.com/spacemonkeygo/errors/doc.go b/vendor/github.com/spacemonkeygo/errors/doc.go new file mode 100644 index 000000000..2fccb4b57 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/doc.go @@ -0,0 +1,205 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package errors is a flexible error support library for Go + +Motivation + +Go's standard library is intentionally sparse on providing error utilities, and +developers coming from other programming languages may miss some features they +took for granted [1]. This package is an attempt at providing those features in +an idiomatic Go way. + +The main features this package provides (in addition to miscellaneous +utilities) are: + + * Error hierarchies + * Stack traces + * Arbitrary error values + +Error hierarchies + +While Go has very deliberately not implemented class hierarchies, a quick +perusal of Go's net and os packages should indicate that sometimes error +hierarchies are useful. Go programmers should be familiar with the net.Error +interface (and the types that fulfill it) as well as the os helper functions +such as os.IsNotExist, os.IsPermission, etc. + +Unfortunately, to implement something similar, a developer will have to +implement a struct that matches the error interface as well as any testing +methods or any more detailed interfaces they may choose to export. It's not +hard, but it is friction, and developers tend to use fmt.Errorf instead due +to ease of use, thus missing out on useful features that functions like +os.IsNotExist and friends provide. + +The errors package provides reusable components for building similar +features while reducing friction as much as possible. With the errors package, +the os error handling routines can be mimicked as follows: + + package osmimic + + import ( + "github.com/spacemonkeygo/errors" + ) + + var ( + OSError = errors.NewClass("OS Error") + NotExist = OSError.NewClass("Not Exist") + ) + + func Open(path string) (*File, error) { + // actually do something here + return nil, NotExist.New("path %#v doesn't exist", path) + } + + func MyMethod() error { + fh, err := Open(mypath) + if err != nil { + if NotExist.Contains(err) { + // file doesn't exist, do stuff + } + return err + } + // do stuff + } + +Stack traces + +It doesn't take long during Go development before you may find yourself +wondering where an error came from. In other languages, as soon as an error is +raised, a stack trace is captured and is displayed as part of the language's +error handling. Go error types are simply basic values and no such magic +happens to tell you what line or what stack an error came from. + +The errors package fixes this by optionally (but by default) capturing the +stack trace as part of your error. This behavior can be turned off and on for +specific error classes and comes in two flavors. You can have the stack trace +be appended to the error's Error() message, or you can have the stack trace +be logged immediately, every time an error of that type is instantiated. + +Every error and error class supports hierarchical settings, in the sense that +if a setting was not explicitly set on that error or error class, setting +resolution traverses the error class hierarchy until it finds a valid setting, +or returns the default. + +See CaptureStack()/NoCaptureStack() and LogOnCreation()/NoLogOnCreation() for +how to control this feature. + +Arbitrary error values + +These hierarchical settings (for whether or not errors captured or logged stack +traces) were so useful, we generalized the system to allow users to extend +the errors system with their own values. A user can tag a specific error with +some value given a statically defined key, or tag a whole error class subtree. + +Arbitrary error values can easily handle situtations like net.Error's +Temporary() field, where some errors are temporary and others aren't. This can +be mimicked as follows: + + package netmimic + + import ( + "github.com/spacemonkeygo/errors" + ) + + var ( + NetError = errors.NewClass("Net Error") + OpError = NetError.NewClass("Op Error") + + tempErrorKey = errors.GenSym() + ) + + func SetIsTemporary() errors.ErrorOption { + return errors.SetData(tempErrorKey, true) + } + + func IsTemporary(err error) bool { + v, ok := errors.GetData(err, tempErrorKey).(bool) + if !ok { + return false + } + return v + } + + func NetworkOp() error { + // actually do something here + return OpError.NewWith("failed operation", SetIsTemporary()) + } + + func Example() error { + for { + err := NetworkOp() + if err != nil { + if IsTemporary(err) { + // probably should do exponential backoff + continue + } + return err + } + } + } + +HTTP handling + +Another great example of arbitrary error value functionality is the errhttp +subpackage. See the errhttp source for more examples of how to use +SetData/GetData. + +The errhttp package really helped clean up our error code. Take a look to +see if it can help your error handling with HTTP stacks too. + +http://godoc.org/github.com/spacemonkeygo/errors/errhttp + +Exit recording + +So you have stack traces, which tells you how the error was generated, but +perhaps you're interested in keeping track of how the error was handled? + +Every time you call errors.Record(err), it adds the current line information +to the error's output. As an example: + + func MyFunction() error { + err := Something() + if err != nil { + if IsTemporary(err) { + // manage the temporary error + return errors.Record(err) + } else { + // manage the permanent error + return errors.Record(err) + } + } + } + +errors.Record will help you keep track of which error handling branch your +code took. + +ErrorGroup + +There's a few different types of ErrorGroup utilities in this package, but they +all work the same way. Make sure to check out the ErrorGroup example. + +CatchPanic + +CatchPanic helps you easily manage functions that you think might panic, and +instead return errors. CatchPanic works by taking a pointer to your named error +return value. Check out the CatchPanic example for more. + +Footnotes + +[1] This errors package started while porting a large Python codebase to Go. +https://www.spacemonkey.com/blog/posts/go-space-monkey +*/ +package errors diff --git a/vendor/github.com/spacemonkeygo/errors/errors.go b/vendor/github.com/spacemonkeygo/errors/errors.go new file mode 100644 index 000000000..7d6d7eeb0 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/errors.go @@ -0,0 +1,648 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "runtime" + "strings" +) + +var ( + logOnCreation = GenSym() + captureStack = GenSym() + disableInheritance = GenSym() +) + +// ErrorClass is the basic hierarchical error type. An ErrorClass generates +// actual errors, but the error class controls properties of the errors it +// generates, such as where those errors are in the hierarchy, whether or not +// they capture the stack on instantiation, and so forth. +type ErrorClass struct { + parent *ErrorClass + name string + data map[DataKey]interface{} +} + +var ( + // HierarchicalError is the base class for all hierarchical errors generated + // through this class. + HierarchicalError = &ErrorClass{ + parent: nil, + name: "Error", + data: map[DataKey]interface{}{captureStack: true}} + + // SystemError is the base error class for errors not generated through this + // errors library. It is not expected that anyone would ever generate new + // errors from a SystemError type or make subclasses. + SystemError = &ErrorClass{ + parent: nil, + name: "System Error", + data: map[DataKey]interface{}{}} +) + +// An ErrorOption is something that controls behavior of specific error +// instances. They can be set on ErrorClasses or errors individually. +type ErrorOption func(map[DataKey]interface{}) + +// SetData will take the given value and store it with the error or error class +// and its descendents associated with the given DataKey. Be sure to check out +// the example. value can be nil to disable values for subhierarchies. +func SetData(key DataKey, value interface{}) ErrorOption { + return func(m map[DataKey]interface{}) { + m[key] = value + } +} + +// LogOnCreation tells the error class and its descendents to log the stack +// whenever an error of this class is created. +func LogOnCreation() ErrorOption { + return SetData(logOnCreation, true) +} + +// CaptureStack tells the error class and its descendents to capture the stack +// whenever an error of this class is created, and output it as part of the +// error's Error() method. This is the default. +func CaptureStack() ErrorOption { + return SetData(captureStack, true) +} + +// NoLogOnCreation is the opposite of LogOnCreation and applies to the error, +// class, and its descendents. This is the default. +func NoLogOnCreation() ErrorOption { + return SetData(logOnCreation, false) +} + +// NoCaptureStack is the opposite of CaptureStack and applies to the error, +// class, and its descendents. +func NoCaptureStack() ErrorOption { + return SetData(captureStack, false) +} + +// If DisableInheritance is provided, the error or error class will belong to +// its ancestors, but will not inherit their settings and options. Use with +// caution, and may disappear in future releases. +func DisableInheritance() ErrorOption { + return SetData(disableInheritance, true) +} + +func boolWrapper(val interface{}, default_value bool) bool { + rv, ok := val.(bool) + if ok { + return rv + } + return default_value +} + +// NewClass creates an error class with the provided name and options. Classes +// generated from this method and not *ErrorClass.NewClass will descend from +// the root HierarchicalError base class. +func NewClass(name string, options ...ErrorOption) *ErrorClass { + return HierarchicalError.NewClass(name, options...) +} + +// New is for compatibility with the default Go errors package. It simply +// creates an error from the HierarchicalError root class. +func New(text string) error { + // NewWith doesn't take a format string, even though we have no options. + return HierarchicalError.NewWith(text) +} + +// NewClass creates an error class with the provided name and options. The new +// class will descend from the receiver. +func (parent *ErrorClass) NewClass(name string, + options ...ErrorOption) *ErrorClass { + + ec := &ErrorClass{ + parent: parent, + name: name, + data: make(map[DataKey]interface{})} + + for _, option := range options { + option(ec.data) + } + + if !boolWrapper(ec.data[disableInheritance], false) { + // hoist options for speed + for key, val := range parent.data { + _, exists := ec.data[key] + if !exists { + ec.data[key] = val + } + } + return ec + } else { + delete(ec.data, disableInheritance) + } + + return ec +} + +// MustAddData allows adding data key value pairs to error classes after they +// are created. This is useful for allowing external packages add namespaced +// values to errors defined outside of their package. It will panic if the +// key is already set in the error class. +func (e *ErrorClass) MustAddData(key DataKey, value interface{}) { + if _, ex := e.data[key]; ex { + panic("key already exists") + } + e.data[key] = value +} + +// GetData will return any data set on the error class for the given key. It +// returns nil if there is no data set for that key. +func (e *ErrorClass) GetData(key DataKey) interface{} { + return e.data[key] +} + +// Parent returns this error class' direct ancestor. +func (e *ErrorClass) Parent() *ErrorClass { + return e.parent +} + +// String returns this error class' name +func (e *ErrorClass) String() string { + if e == nil { + return "nil" + } + return e.name +} + +// Is returns true if the receiver class is or is a descendent of parent. +func (e *ErrorClass) Is(parent *ErrorClass) bool { + for check := e; check != nil; check = check.parent { + if check == parent { + return true + } + } + return false +} + +// frame logs the pc at some point during execution. +type frame struct { + pc uintptr +} + +// String returns a human readable form of the frame. +func (e frame) String() string { + if e.pc == 0 { + return "unknown.unknown:0" + } + f := runtime.FuncForPC(e.pc) + if f == nil { + return "unknown.unknown:0" + } + file, line := f.FileLine(e.pc) + return fmt.Sprintf("%s:%s:%d", f.Name(), filepath.Base(file), line) +} + +// callerState records the pc into an frame for two callers up. +func callerState(depth int) frame { + pc, _, _, ok := runtime.Caller(depth) + if !ok { + return frame{pc: 0} + } + return frame{pc: pc} +} + +// record will record the pc at the given depth into the error if it is +// capable of recording it. +func record(err error, depth int) error { + if err == nil { + return nil + } + cast, ok := err.(*Error) + if !ok { + return err + } + cast.exits = append(cast.exits, callerState(depth)) + return cast +} + +// Record will record the current pc on the given error if possible, adding +// to the error's recorded exits list. Returns the given error argument. +func Record(err error) error { + return record(err, 3) +} + +// RecordBefore will record the pc depth frames above the current stack frame +// on the given error if possible, adding to the error's recorded exits list. +// Record(err) is equivalent to RecordBefore(err, 0). Returns the given error +// argument. +func RecordBefore(err error, depth int) error { + return record(err, 3+depth) +} + +// Error is the type that represents a specific error instance. It is not +// expected that you will work with *Error classes directly. Instead, you +// should use the 'error' interface and errors package methods that operate +// on errors instances. +type Error struct { + err error + class *ErrorClass + stacks [][]frame + exits []frame + data map[DataKey]interface{} +} + +// GetData returns the value associated with the given DataKey on this error +// or any of its ancestors. Please see the example for SetData +func (e *Error) GetData(key DataKey) interface{} { + if e.data != nil { + val, ok := e.data[key] + if ok { + return val + } + if boolWrapper(e.data[disableInheritance], false) { + return nil + } + } + return e.class.data[key] +} + +// GetData returns the value associated with the given DataKey on this error +// or any of its ancestors. Please see the example for SetData +func GetData(err error, key DataKey) interface{} { + cast, ok := err.(*Error) + if ok { + return cast.GetData(key) + } + return nil +} + +func (e *ErrorClass) wrap(err error, classes []*ErrorClass, + options []ErrorOption) error { + if err == nil { + return nil + } + if ec, ok := err.(*Error); ok { + if ec.Is(e) { + if len(options) == 0 { + return ec + } + // if we have options, we have to wrap it cause we don't want to + // mutate the existing error. + } else { + for _, class := range classes { + if ec.Is(class) { + return err + } + } + } + } + + rv := &Error{err: err, class: e} + if len(options) > 0 { + rv.data = make(map[DataKey]interface{}) + for _, option := range options { + option(rv.data) + } + } + + if boolWrapper(rv.GetData(captureStack), false) { + rv.stacks = [][]frame{getStack(3)} + } + if boolWrapper(rv.GetData(logOnCreation), false) { + LogWithStack(rv.Error()) + } + return rv +} + +func getStack(depth int) (stack []frame) { + var pcs [256]uintptr + amount := runtime.Callers(depth+1, pcs[:]) + stack = make([]frame, amount) + for i := 0; i < amount; i++ { + stack[i] = frame{pcs[i]} + } + return stack +} + +// AttachStack adds another stack to the current error's stack trace if it +// exists +func AttachStack(err error) { + if err == nil { + return + } + cast, ok := err.(*Error) + if !ok { + return + } + if len(cast.stacks) < 1 { + // only record stacks if this error was supposed to + return + } + cast.stacks = append(cast.stacks, getStack(2)) +} + +// WrapUnless wraps the given error in the receiver error class unless the +// error is already an instance of one of the provided error classes. +func (e *ErrorClass) WrapUnless(err error, classes ...*ErrorClass) error { + return e.wrap(err, classes, nil) +} + +// Wrap wraps the given error in the receiver error class with the provided +// error-specific options. +func (e *ErrorClass) Wrap(err error, options ...ErrorOption) error { + return e.wrap(err, nil, options) +} + +// New makes a new error type. It takes a format string. +func (e *ErrorClass) New(format string, args ...interface{}) error { + return e.wrap(fmt.Errorf(format, args...), nil, nil) +} + +// NewWith makes a new error type with the provided error-specific options. +func (e *ErrorClass) NewWith(message string, options ...ErrorOption) error { + return e.wrap(errors.New(message), nil, options) +} + +// Error conforms to the error interface. Error will return the backtrace if +// it was captured and any recorded exits. +func (e *Error) Error() string { + message := strings.TrimRight(e.err.Error(), "\n ") + if strings.Contains(message, "\n") { + message = fmt.Sprintf("%s:\n %s", e.class.String(), + strings.Replace(message, "\n", "\n ", -1)) + } else { + message = fmt.Sprintf("%s: %s", e.class.String(), message) + } + if stack := e.Stack(); stack != "" { + message = fmt.Sprintf( + "%s\n\"%s\" backtrace:\n%s", message, e.class, stack) + } + if exits := e.Exits(); exits != "" { + message = fmt.Sprintf( + "%s\n\"%s\" exits:\n%s", message, e.class, exits) + } + return message +} + +// Message returns just the error message without the backtrace or exits. +func (e *Error) Message() string { + message := strings.TrimRight(GetMessage(e.err), "\n ") + if strings.Contains(message, "\n") { + return fmt.Sprintf("%s:\n %s", e.class.String(), + strings.Replace(message, "\n", "\n ", -1)) + } + return fmt.Sprintf("%s: %s", e.class.String(), message) +} + +// WrappedErr returns the wrapped error, if the current error is simply +// wrapping some previously returned error or system error. You probably want +// the package-level WrappedErr +func (e *Error) WrappedErr() error { + return e.err +} + +// WrappedErr returns the wrapped error, if the current error is simply +// wrapping some previously returned error or system error. If the error isn't +// hierarchical it is just returned. +func WrappedErr(err error) error { + cast, ok := err.(*Error) + if !ok { + return err + } + return cast.WrappedErr() +} + +// Class will return the appropriate error class for the given error. You +// probably want the package-level GetClass. +func (e *Error) Class() *ErrorClass { + return e.class +} + +// Name returns the name of the error: in this case the name of the class the +// error belongs to. +func (e *Error) Name() (string, bool) { + return e.class.name, true +} + +// GetClass will return the appropriate error class for the given error. +// If the error is not nil, GetClass always returns a hierarchical error class, +// and even attempts to determine a class for common system error types. +func GetClass(err error) *ErrorClass { + if err == nil { + return nil + } + cast, ok := err.(*Error) + if !ok { + return findSystemErrorClass(err) + } + return cast.class +} + +// Stack will return the stack associated with the error if one is found. You +// probably want the package-level GetStack. +func (e *Error) Stack() string { + if len(e.stacks) > 0 { + var frames []string + for _, stack := range e.stacks { + if frames == nil { + frames = make([]string, 0, len(stack)) + } else { + frames = append(frames, "----- attached stack -----") + } + for _, f := range stack { + frames = append(frames, f.String()) + } + } + return strings.Join(frames, "\n") + } + return "" +} + +// GetStack will return the stack associated with the error if one is found. +func GetStack(err error) string { + if err == nil { + return "" + } + cast, ok := err.(*Error) + if !ok { + return "" + } + return cast.Stack() +} + +// Exits will return the exits recorded on the error if any are found. You +// probably want the package-level GetExits. +func (e *Error) Exits() string { + if len(e.exits) > 0 { + exits := make([]string, len(e.exits)) + for i, ex := range e.exits { + exits[i] = ex.String() + } + return strings.Join(exits, "\n") + } + return "" +} + +// GetExits will return the exits recorded on the error if any are found. +func GetExits(err error) string { + if err == nil { + return "" + } + cast, ok := err.(*Error) + if !ok { + return "" + } + return cast.Exits() +} + +// GetMessage returns just the error message without the backtrace or exits. +func GetMessage(err error) string { + if err == nil { + return "" + } + cast, ok := err.(*Error) + if !ok { + return err.Error() + } + return cast.Message() +} + +// EquivalenceOption values control behavior of determining whether or not an +// error belongs to a specific class. +type EquivalenceOption int + +const ( + // If IncludeWrapped is used, wrapped errors are also used for determining + // class membership. + IncludeWrapped EquivalenceOption = 1 +) + +func combineEquivOpts(opts []EquivalenceOption) (rv EquivalenceOption) { + for _, opt := range opts { + rv |= opt + } + return rv +} + +// Is returns whether or not an error belongs to a specific class. Typically +// you should use Contains instead. +func (e *Error) Is(ec *ErrorClass, opts ...EquivalenceOption) bool { + return ec.Contains(e, opts...) +} + +// Contains returns whether or not the receiver error class contains the given +// error instance. +func (e *ErrorClass) Contains(err error, opts ...EquivalenceOption) bool { + if err == nil { + return false + } + cast, ok := err.(*Error) + if !ok { + return findSystemErrorClass(err).Is(e) + } + if cast.class.Is(e) { + return true + } + if combineEquivOpts(opts)&IncludeWrapped == 0 { + return false + } + return e.Contains(cast.err, opts...) +} + +var ( + // Useful error classes + NotImplementedError = NewClass("Not Implemented Error", LogOnCreation()) + ProgrammerError = NewClass("Programmer Error", LogOnCreation()) + PanicError = NewClass("Panic Error", LogOnCreation()) + + // The following SystemError descendants are provided such that the GetClass + // method has something to return for standard library error types not + // defined through this class. + // + // It is not expected that anyone would create instances of these classes. + // + // from os + SyscallError = SystemError.NewClass("Syscall Error") + // from syscall + ErrnoError = SystemError.NewClass("Errno Error") + // from net + NetworkError = SystemError.NewClass("Network Error") + UnknownNetworkError = NetworkError.NewClass("Unknown Network Error") + AddrError = NetworkError.NewClass("Addr Error") + InvalidAddrError = AddrError.NewClass("Invalid Addr Error") + NetOpError = NetworkError.NewClass("Network Op Error") + NetParseError = NetworkError.NewClass("Network Parse Error") + DNSError = NetworkError.NewClass("DNS Error") + DNSConfigError = DNSError.NewClass("DNS Config Error") + // from io + IOError = SystemError.NewClass("IO Error") + EOF = IOError.NewClass("EOF") + ClosedPipeError = IOError.NewClass("Closed Pipe Error") + NoProgressError = IOError.NewClass("No Progress Error") + ShortBufferError = IOError.NewClass("Short Buffer Error") + ShortWriteError = IOError.NewClass("Short Write Error") + UnexpectedEOFError = IOError.NewClass("Unexpected EOF Error") + // from context + ContextError = SystemError.NewClass("Context Error") + ContextCanceled = ContextError.NewClass("Canceled") + ContextTimeout = ContextError.NewClass("Timeout") +) + +func findSystemErrorClass(err error) *ErrorClass { + switch err { + case io.EOF: + return EOF + case io.ErrUnexpectedEOF: + return UnexpectedEOFError + case io.ErrClosedPipe: + return ClosedPipeError + case io.ErrNoProgress: + return NoProgressError + case io.ErrShortBuffer: + return ShortBufferError + case io.ErrShortWrite: + return ShortWriteError + case contextCanceled: + return ContextCanceled + case contextDeadlineExceeded: + return ContextTimeout + default: + break + } + if isErrnoError(err) { + return ErrnoError + } + switch err.(type) { + case *os.SyscallError: + return SyscallError + case net.UnknownNetworkError: + return UnknownNetworkError + case *net.AddrError: + return AddrError + case net.InvalidAddrError: + return InvalidAddrError + case *net.OpError: + return NetOpError + case *net.ParseError: + return NetParseError + case *net.DNSError: + return DNSError + case *net.DNSConfigError: + return DNSConfigError + case net.Error: + return NetworkError + default: + return SystemError + } +} diff --git a/vendor/github.com/spacemonkeygo/errors/syscall.go b/vendor/github.com/spacemonkeygo/errors/syscall.go new file mode 100644 index 000000000..0607fd235 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/syscall.go @@ -0,0 +1,26 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !appengine + +package errors + +import ( + "syscall" +) + +func isErrnoError(err error) bool { + _, ok := err.(syscall.Errno) + return ok +} diff --git a/vendor/github.com/spacemonkeygo/errors/syscall_ae.go b/vendor/github.com/spacemonkeygo/errors/syscall_ae.go new file mode 100644 index 000000000..6f6a64479 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/syscall_ae.go @@ -0,0 +1,21 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package errors + +func isErrnoError(err error) bool { + return false +} diff --git a/vendor/github.com/spacemonkeygo/errors/utils.go b/vendor/github.com/spacemonkeygo/errors/utils.go new file mode 100644 index 000000000..add827278 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/utils.go @@ -0,0 +1,155 @@ +// Copyright (C) 2014 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "log" + "runtime" + "strings" +) + +var ( + // Change this method if you want errors to log somehow else + LogMethod = log.Printf + + ErrorGroupError = NewClass("Error Group Error") +) + +// LogWithStack will log the given messages with the current stack +func LogWithStack(messages ...interface{}) { + buf := make([]byte, Config.Stacklogsize) + buf = buf[:runtime.Stack(buf, false)] + LogMethod("%s\n%s", fmt.Sprintln(messages...), buf) +} + +// CatchPanic can be used to catch panics and turn them into errors. See the +// example. +func CatchPanic(err_ref *error) { + r := recover() + if r == nil { + return + } + err, ok := r.(error) + if ok { + *err_ref = PanicError.Wrap(err) + return + } + *err_ref = PanicError.New("%v", r) +} + +// ErrorGroup is a type for collecting errors from a bunch of independent +// tasks. ErrorGroups are not threadsafe. See the example for usage. +type ErrorGroup struct { + Errors []error + limit int + excess int +} + +// NewErrorGroup makes a new ErrorGroup +func NewErrorGroup() *ErrorGroup { return &ErrorGroup{} } + +// NewBoundedErrorGroup makes a new ErrorGroup that will not track more than +// limit errors. Once the limit is reached, the ErrorGroup will track +// additional errors as excess. +func NewBoundedErrorGroup(limit int) *ErrorGroup { + return &ErrorGroup{ + limit: limit, + } +} + +// Add is called with errors. nil errors are ignored. +func (e *ErrorGroup) Add(err error) { + if err == nil { + return + } + if e.limit > 0 && len(e.Errors) == e.limit { + e.excess++ + } else { + e.Errors = append(e.Errors, err) + } +} + +// Finalize will collate all the found errors. If no errors were found, it will +// return nil. If one error was found, it will be returned directly. Otherwise +// an ErrorGroupError will be returned. +func (e *ErrorGroup) Finalize() error { + if len(e.Errors) == 0 { + return nil + } + if len(e.Errors) == 1 && e.excess == 0 { + return e.Errors[0] + } + msgs := make([]string, 0, len(e.Errors)) + for _, err := range e.Errors { + msgs = append(msgs, err.Error()) + } + if e.excess > 0 { + msgs = append(msgs, fmt.Sprintf("... and %d more.", e.excess)) + e.excess = 0 + } + e.Errors = nil + return ErrorGroupError.New(strings.Join(msgs, "\n")) +} + +// LoggingErrorGroup is similar to ErrorGroup except that instead of collecting +// all of the errors, it logs the errors immediately and just counts how many +// non-nil errors have been seen. See the ErrorGroup example for usage. +type LoggingErrorGroup struct { + name string + total int + failed int +} + +// NewLoggingErrorGroup returns a new LoggingErrorGroup with the given name. +func NewLoggingErrorGroup(name string) *LoggingErrorGroup { + return &LoggingErrorGroup{name: name} +} + +// Add will handle a given error. If the error is non-nil, total and failed +// are both incremented and the error is logged. If the error is nil, only +// total is incremented. +func (e *LoggingErrorGroup) Add(err error) { + e.total++ + if err != nil { + LogMethod("%s: %s", e.name, err) + e.failed++ + } +} + +// Finalize returns no error if no failures were observed, otherwise it will +// return an ErrorGroupError with statistics about the observed errors. +func (e *LoggingErrorGroup) Finalize() (err error) { + if e.failed > 0 { + err = ErrorGroupError.New("%s: %d of %d failed.", e.name, e.failed, + e.total) + } + e.total = 0 + e.failed = 0 + return err +} + +type Finalizer interface { + Finalize() error +} + +// Finalize takes a group of ErrorGroups and joins them together into one error +func Finalize(finalizers ...Finalizer) error { + var errs ErrorGroup + for _, finalizer := range finalizers { + errs.Add(finalizer.Finalize()) + } + return errs.Finalize() +} diff --git a/vendor/github.com/spacemonkeygo/errors/xctx.go b/vendor/github.com/spacemonkeygo/errors/xctx.go new file mode 100644 index 000000000..715fe5027 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/errors/xctx.go @@ -0,0 +1,26 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.7 + +package errors + +import ( + "golang.org/x/net/context" +) + +var ( + contextCanceled = context.Canceled + contextDeadlineExceeded = context.DeadlineExceeded +) diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/AUTHORS b/vendor/github.com/spacemonkeygo/monkit/v3/AUTHORS new file mode 100644 index 000000000..10f2e1141 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/AUTHORS @@ -0,0 +1 @@ +Space Monkey, Inc. diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/LICENSE b/vendor/github.com/spacemonkeygo/monkit/v3/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/README.md b/vendor/github.com/spacemonkeygo/monkit/v3/README.md new file mode 100644 index 000000000..c03405543 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/README.md @@ -0,0 +1,637 @@ +# ![monkit](https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/logo.png) + +Package monkit is a flexible code instrumenting and data collection library. + +See documentation at https://godoc.org/gopkg.in/spacemonkeygo/monkit.v3 + +Software is hard. Like, really hard. +[Just the worst](http://www.stilldrinking.org/programming-sucks). Sometimes it +feels like we've constructed a field where the whole point is to see how +tangled we can get ourselves before seeing if we can get tangled up more while +trying to get untangled. + +Many software engineering teams are coming to realize (some slower than others) +that collecting data over time about how their systems are functioning is a +super power you can't turn back from. Some teams are calling this +[Telemetry](http://techblog.netflix.com/2014/12/introducing-atlas-netflixs-primary.html), +[Observability](https://blog.twitter.com/2013/observability-at-twitter), or +describing it more basically through subcomponents such as +[distributed tracing](http://research.google.com/pubs/pub36356.html), +[time-series data](https://influxdata.com/), or even just +[metrics](http://metrics.dropwizard.io/). We've been calling it monitoring, but +geez, I suppose if trends continue and you want to do this yourself your first +step should be to open a thesaurus and pick an unused term. + +I'm not here to tell you about our whole platform. Instead, I'm here to +explain a redesign of a Go library for instrumenting your Go programs that we +rather quietly launched a few years ago. If you are already using version 1 of +our [old library](https://github.com/spacemonkeygo/monitor), we're sorry, but +we rewrote it from scratch and renamed it to monkit. This one (this one!) is +better - you should switch! + +I'm going to try and sell you as fast as I can on this library. + +## Example usage + +```go +package main + +import ( + "context" + "fmt" + "log" + "net/http" + + "gopkg.in/spacemonkeygo/monkit.v3" + "gopkg.in/spacemonkeygo/monkit.v3/environment" + "gopkg.in/spacemonkeygo/monkit.v3/present" +) + +var ( + mon = monkit.Package() +) + +func ComputeThing(ctx context.Context, arg1, arg2 int) (res int, err error) { + defer mon.Task()(&ctx)(&err) + + timer := mon.Timer("subcomputation").Start() + res = arg1 + arg2 + timer.Stop() + + if res == 3 { + mon.Event("hit 3") + } + + mon.BoolVal("was-4").Observe(res == 4) + mon.IntVal("res").Observe(int64(res)) + mon.Counter("calls").Inc(1) + mon.Gauge("arg1", func() float64 { return float64(arg1) }) + mon.Meter("arg2").Mark(arg2) + + return arg1 + arg2, nil +} + +func DoStuff(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + + result, err := ComputeThing(ctx, 1, 2) + if err != nil { + return err + } + + fmt.Println(result) + return +} + +func main() { + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + log.Println(DoStuff(context.Background())) +} +``` + +## Metrics + +We've got tools that capture distribution information (including quantiles) +about int64, float64, and bool types. We have tools that capture data about +events (we've got meters for deltas, rates, etc). We have rich tools for +capturing information about tasks and functions, and literally anything that +can generate a name and a number. + +Almost just as importantly, the amount of boilerplate and code you have to +write to get these features is very minimal. Data that's hard to measure +probably won't get measured. + +This data can be collected and sent to [Graphite](http://graphite.wikidot.com/) +or any other time-series database. + +Here's a selection of live stats from one of our storage nodes: + +``` +env.os.fds 120.000000 +env.os.proc.stat.Minflt 81155.000000 +env.os.proc.stat.Cminflt 11789.000000 +env.os.proc.stat.Majflt 10.000000 +env.os.proc.stat.Cmajflt 6.000000 +... + +env.process.control 1.000000 +env.process.crc 3819014369.000000 +env.process.uptime 163225.292925 +env.runtime.goroutines 52.000000 +env.runtime.memory.Alloc 2414080.000000 +... + +env.rusage.Maxrss 26372.000000 +... + +sm/flud/csl/client.(*CSLClient).Verify.current 0.000000 +sm/flud/csl/client.(*CSLClient).Verify.success 788.000000 +sm/flud/csl/client.(*CSLClient).Verify.error volume missing 91.000000 +sm/flud/csl/client.(*CSLClient).Verify.error dial error 1.000000 +sm/flud/csl/client.(*CSLClient).Verify.panics 0.000000 +sm/flud/csl/client.(*CSLClient).Verify.success times min 0.102214 +sm/flud/csl/client.(*CSLClient).Verify.success times avg 1.899133 +sm/flud/csl/client.(*CSLClient).Verify.success times max 8.601230 +sm/flud/csl/client.(*CSLClient).Verify.success times recent 2.673128 +sm/flud/csl/client.(*CSLClient).Verify.failure times min 0.682881 +sm/flud/csl/client.(*CSLClient).Verify.failure times avg 3.936571 +sm/flud/csl/client.(*CSLClient).Verify.failure times max 6.102318 +sm/flud/csl/client.(*CSLClient).Verify.failure times recent 2.208020 +sm/flud/csl/server.store.avg 710800.000000 +sm/flud/csl/server.store.count 271.000000 +sm/flud/csl/server.store.max 3354194.000000 +sm/flud/csl/server.store.min 467.000000 +sm/flud/csl/server.store.recent 1661376.000000 +sm/flud/csl/server.store.sum 192626890.000000 +... +``` + +## Call graphs + +This library generates call graphs of your live process for you. + +These call graphs aren't created through sampling. They're full pictures of all +of the interesting functions you've annotated, along with quantile information +about their successes, failures, how often they panic, return an error (if so +instrumented), how many are currently running, etc. + +The data can be returned in dot format, in json, in text, and can be about +just the functions that are currently executing, or all the functions the +monitoring system has ever seen. + +Here's another example of one of our production nodes: + +![callgraph](https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/callgraph2.png) + +## Trace graphs + +This library generates trace graphs of your live process for you directly, +without requiring standing up some tracing system such as Zipkin (though you +can do that too). + +Inspired by [Google's Dapper](http://research.google.com/pubs/pub36356.html) +and [Twitter's Zipkin](http://zipkin.io), we have process-internal trace +graphs, triggerable by a number of different methods. + +You get this trace information for free whenever you use +[Go contexts](https://blog.golang.org/context) and function monitoring. The +output formats are svg and json. + +Additionally, the library supports trace observation plugins, and we've written +[a plugin that sends this data to Zipkin](http://github.com/spacemonkeygo/monkit-zipkin). + +![trace](https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/trace.png) + +## History + +Before our crazy +[Go rewrite of everything](https://www.spacemonkey.com/blog/posts/go-space-monkey) +(and before we had even seen Google's Dapper paper), we were a Python shop, and +all of our "interesting" functions were decorated with a helper that collected +timing information and sent it to Graphite. + +When we transliterated to Go, we wanted to preserve that functionality, so the +first version of our monitoring package was born. + +Over time it started to get janky, especially as we found Zipkin and started +adding tracing functionality to it. We rewrote all of our Go code to use Google +contexts, and then realized we could get call graph information. We decided a +refactor and then an all-out rethinking of our monitoring package was best, +and so now we have this library. + +## Aside about contexts + +Sometimes you really want callstack contextual information without having to +pass arguments through everything on the call stack. In other languages, many +people implement this with thread-local storage. + +Example: let's say you have written a big system that responds to user +requests. All of your libraries log using your log library. During initial +development everything is easy to debug, since there's low user load, but now +you've scaled and there's OVER TEN USERS and it's kind of hard to tell what log +lines were caused by what. Wouldn't it be nice to add request ids to all of the +log lines kicked off by that request? Then you could grep for all log lines +caused by a specific request id. Geez, it would suck to have to pass all +contextual debugging information through all of your callsites. + +Google solved this problem by always passing a `context.Context` interface +through from call to call. A `Context` is basically just a mapping of arbitrary +keys to arbitrary values that users can add new values for. This way if you +decide to add a request context, you can add it to your `Context` and then all +callsites that descend from that place will have the new data in their contexts. + +It is admittedly very verbose to add contexts to every function call. +Painfully so. I hope to write more about it in the future, but [Google also +wrote up their thoughts about it](https://blog.golang.org/context), which you +can go read. For now, just swallow your disgust and let's keep moving. + +## Motivating program + +Let's make a super simple [Varnish](https://www.varnish-cache.org/) clone. +Open up gedit! (Okay just kidding, open whatever text editor you want.) + +For this motivating program, we won't even add the caching, though there's +comments for where to add it if you'd like. For now, let's just make a +barebones system that will proxy HTTP requests. We'll call it VLite, but +maybe we should call it VReallyLite. + +```go +package main + +import ( + "flag" + "net/http" + "net/http/httputil" + "net/url" +) + +type VLite struct { + target *url.URL + proxy *httputil.ReverseProxy +} + +func NewVLite(target *url.URL) *VLite { + return &VLite{ + target: target, + proxy: httputil.NewSingleHostReverseProxy(target), + } +} + +func (v *VLite) Proxy(w http.ResponseWriter, r *http.Request) { + r.Host = v.target.Host // let the proxied server get the right vhost + v.proxy.ServeHTTP(w, r) +} + +func (v *VLite) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // here's where you'd put caching logic + v.Proxy(w, r) +} + +func main() { + target := flag.String( + "proxy", + "http://hasthelargehadroncolliderdestroyedtheworldyet.com/", + "server to cache") + flag.Parse() + targetURL, err := url.Parse(*target) + if err != nil { + panic(err) + } + panic(http.ListenAndServe(":8080", NewVLite(targetURL))) +} +``` + +Run and build this and open `localhost:8080` in your browser. If you use the +default proxy target, it should inform you that the world hasn't been +destroyed yet. + +## Adding basic instrumentation + +The first thing you'll want to do is add the small amount of boilerplate to +make the instrumentation we're going to add to your process observable later. + +Import the basic monkit packages: + +```go +"gopkg.in/spacemonkeygo/monkit.v3" +"gopkg.in/spacemonkeygo/monkit.v3/environment" +"gopkg.in/spacemonkeygo/monkit.v3/present" +``` + +and then register environmental statistics and kick off a goroutine in your +main method to serve debug requests: + +```go +environment.Register(monkit.Default) +go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) +``` + +Rebuild, and then check out `localhost:9000/stats` (or +`localhost:9000/stats/json`, if you prefer) in your browser! + +## Request contexts + +Remember what I said about [Google's contexts](https://blog.golang.org/context)? +It might seem a bit overkill for such a small project, but it's time to add +them. + +To help out here, I've created a library that constructs contexts for you +for incoming HTTP requests. Nothing that's about to happen requires my +[webhelp library](https://godoc.org/github.com/jtolds/webhelp), but here is the +code now refactored to receive and pass contexts through our two per-request +calls. + +```go +package main + +import ( + "context" + "flag" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/jtolds/webhelp" + "gopkg.in/spacemonkeygo/monkit.v3" + "gopkg.in/spacemonkeygo/monkit.v3/environment" + "gopkg.in/spacemonkeygo/monkit.v3/present" +) + +type VLite struct { + target *url.URL + proxy *httputil.ReverseProxy +} + +func NewVLite(target *url.URL) *VLite { + return &VLite{ + target: target, + proxy: httputil.NewSingleHostReverseProxy(target), + } +} + +func (v *VLite) Proxy(ctx context.Context, w http.ResponseWriter, r *http.Request) { + r.Host = v.target.Host // let the proxied server get the right vhost + v.proxy.ServeHTTP(w, r) +} + +func (v *VLite) HandleHTTP(ctx context.Context, w webhelp.ResponseWriter, r *http.Request) error { + // here's where you'd put caching logic + v.Proxy(ctx, w, r) + return nil +} + +func main() { + target := flag.String( + "proxy", + "http://hasthelargehadroncolliderdestroyedtheworldyet.com/", + "server to cache") + flag.Parse() + targetURL, err := url.Parse(*target) + if err != nil { + panic(err) + } + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + panic(webhelp.ListenAndServe(":8080", NewVLite(targetURL))) +} +``` + +You can create a new context for a request however you want. One reason to use +something like webhelp is that the cancelation feature of Contexts is hooked +up to the HTTP request getting canceled. + +## Monitor some requests + +Let's start to get statistics about how many requests we receive! First, this +package (main) will need to get a monitoring Scope. Add this global definition +right after all your imports, much like you'd create a logger with many logging +libraries: + +```go +var mon = monkit.Package() +``` + +Now, make the error return value of HandleHTTP named (so, (err error)), and add +this defer line as the very first instruction of HandleHTTP: + +```go +func (v *VLite) HandleHTTP(ctx context.Context, w webhelp.ResponseWriter, r *http.Request) (err error) { + defer mon.Task()(&ctx)(&err) +``` + +Let's also add the same line (albeit modified for the lack of error) to +Proxy, replacing &err with nil: + +```go +func (v *VLite) Proxy(ctx context.Context, w http.ResponseWriter, r *http.Request) { + defer mon.Task()(&ctx)(nil) +``` + +You should now have something like: + +```go +package main + +import ( + "context" + "flag" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/jtolds/webhelp" + "gopkg.in/spacemonkeygo/monkit.v3" + "gopkg.in/spacemonkeygo/monkit.v3/environment" + "gopkg.in/spacemonkeygo/monkit.v3/present" +) + +var mon = monkit.Package() + +type VLite struct { + target *url.URL + proxy *httputil.ReverseProxy +} + +func NewVLite(target *url.URL) *VLite { + return &VLite{ + target: target, + proxy: httputil.NewSingleHostReverseProxy(target), + } +} + +func (v *VLite) Proxy(ctx context.Context, w http.ResponseWriter, r *http.Request) { + defer mon.Task()(&ctx)(nil) + r.Host = v.target.Host // let the proxied server get the right vhost + v.proxy.ServeHTTP(w, r) +} + +func (v *VLite) HandleHTTP(ctx context.Context, w webhelp.ResponseWriter, r *http.Request) (err error) { + defer mon.Task()(&ctx)(&err) + // here's where you'd put caching logic + v.Proxy(ctx, w, r) + return nil +} + +func main() { + target := flag.String( + "proxy", + "http://hasthelargehadroncolliderdestroyedtheworldyet.com/", + "server to cache") + flag.Parse() + targetURL, err := url.Parse(*target) + if err != nil { + panic(err) + } + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + panic(webhelp.ListenAndServe(":8080", NewVLite(targetURL))) +} +``` + +We'll unpack what's going on here, but for now: + + * Rebuild and restart! + * Trigger a full refresh at `localhost:8080` to make sure your new HTTP + handler runs + * Visit `localhost:9000/stats` and then `localhost:9000/funcs` + +For this new funcs dataset, if you want a graph, you can download a dot +graph at `localhost:9000/funcs/dot` and json information from +`localhost:9000/funcs/json`. + +You should see something like: + +``` +[3693964236144930897] main.(*VLite).HandleHTTP + parents: entry + current: 0, highwater: 1, success: 2, errors: 0, panics: 0 + success times: + 0.00: 63.930436ms + 0.10: 70.482159ms + 0.25: 80.309745ms + 0.50: 96.689054ms + 0.75: 113.068363ms + 0.90: 122.895948ms + 0.95: 126.17181ms + 1.00: 129.447675ms + avg: 96.689055ms + failure times: + 0.00: 0 + 0.10: 0 + 0.25: 0 + 0.50: 0 + 0.75: 0 + 0.90: 0 + 0.95: 0 + 1.00: 0 + avg: 0 +``` + +with a similar report for the Proxy method, or a graph like: + +![handlehttp](https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/handlehttp.png) + +This data reports the overall callgraph of execution for known traces, along +with how many of each function are currently running, the most running +concurrently (the highwater), how many were successful along with quantile +timing information, how many errors there were (with quantile timing +information if applicable), and how many panics there were. Since the Proxy +method isn't capturing a returned err value, and since HandleHTTP always +returns nil, this example won't ever have failures. + +If you're wondering about the success count being higher than you expected, +keep in mind your browser probably requested a favicon.ico. + +Cool, eh? + +## How it works + +```go +defer mon.Task()(&ctx)(&err) +``` + +is an interesting line of code - there's three function calls. If you look at +the Go spec, all of the function calls will run at the time the function starts +except for the very last one. + +The first function call, mon.Task(), creates or looks up a wrapper around a +Func. You could get this yourself by requesting mon.Func() inside of the +appropriate function or mon.FuncNamed(). Both mon.Task() and mon.Func() +are inspecting runtime.Caller to determine the name of the function. Because +this is a heavy operation, you can actually store the result of mon.Task() and +reuse it somehow else if you prefer, so instead of + +```go +func MyFunc(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) +} +``` + +you could instead use + +```go +var myFuncMon = mon.Task() + +func MyFunc(ctx context.Context) (err error) { + defer myFuncMon(&ctx)(&err) +} +``` + +which is more performant every time after the first time. runtime.Caller only +gets called once. + +Careful! Don't use the same myFuncMon in different functions unless you want to +screw up your statistics! + +The second function call starts all the various stop watches and bookkeeping to +keep track of the function. It also mutates the context pointer it's given to +extend the context with information about what current span (in Zipkin +parlance) is active. Notably, you *can* pass nil for the context if you really +don't want a context. You just lose callgraph information. + +The last function call stops all the stop watches ad makes a note of any +observed errors or panics (it repanics after observing them). + +## Tracing + +Turns out, we don't even need to change our program anymore to get rich tracing +information! + +Open your browser and go to `localhost:9000/trace/svg?regex=HandleHTTP`. It +won't load, and in fact, it's waiting for you to open another tab and refresh +`localhost:8080` again. Once you retrigger the actual application behavior, +the trace regex will capture a trace starting on the first function that +matches the supplied regex, and return an svg. Go back to your first tab, and +you should see a relatively uninteresting but super promising svg. + +Let's make the trace more interesting. Add a + +```go +time.Sleep(200 * time.Millisecond) +``` + +to your HandleHTTP method, rebuild, and restart. Load `localhost:8080`, then +start a new request to your trace URL, then reload `localhost:8080` again. Flip +back to your trace, and you should see that the Proxy method only takes a +portion of the time of HandleHTTP! + +![trace](https://cdn.rawgit.com/spacemonkeygo/monkit/master/images/trace.svg) + +There's multiple ways to select a trace. You can select by regex using the +preselect method (default), which first evaluates the regex on all known +functions for sanity checking. Sometimes, however, the function you want to +trace may not yet be known to monkit, in which case you'll want +to turn preselection off. You may have a bad regex, or you may be in this case +if you get the error "Bad Request: regex preselect matches 0 functions." + +Another way to select a trace is by providing a trace id, which we'll get to +next! + +Make sure to check out what the addition of the time.Sleep call did to the +other reports. + +## Plugins + +It's easy to write plugins for monkit! Check out our first one that exports +data to [Zipkin](http://zipkin.io/)'s Scribe API: + + * https://github.com/spacemonkeygo/monkit-zipkin + +We plan to have more (for HTrace, OpenTracing, etc, etc), soon! + +## License + +Copyright (C) 2016 Space Monkey, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/callers.go b/vendor/github.com/spacemonkeygo/monkit/v3/callers.go new file mode 100644 index 000000000..28830c6a7 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/callers.go @@ -0,0 +1,48 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "runtime" + "strings" +) + +func callerPackage(frames int) string { + var pc [1]uintptr + if runtime.Callers(frames+2, pc[:]) != 1 { + return "unknown" + } + frame, _ := runtime.CallersFrames(pc[:]).Next() + if frame.Func == nil { + return "unknown" + } + slash_pieces := strings.Split(frame.Func.Name(), "/") + dot_pieces := strings.SplitN(slash_pieces[len(slash_pieces)-1], ".", 2) + return strings.Join(slash_pieces[:len(slash_pieces)-1], "/") + "/" + dot_pieces[0] +} + +func callerFunc(frames int) string { + var pc [1]uintptr + if runtime.Callers(frames+3, pc[:]) != 1 { + return "unknown" + } + frame, _ := runtime.CallersFrames(pc[:]).Next() + if frame.Function == "" { + return "unknown" + } + slash_pieces := strings.Split(frame.Function, "/") + dot_pieces := strings.SplitN(slash_pieces[len(slash_pieces)-1], ".", 2) + return dot_pieces[len(dot_pieces)-1] +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/cas_safe.go b/vendor/github.com/spacemonkeygo/monkit/v3/cas_safe.go new file mode 100644 index 000000000..2260d08e9 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/cas_safe.go @@ -0,0 +1,75 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package monkit + +import "sync" + +// TODO(jeff): make this mutex smaller scoped, perhaps based on the arguments +// to compare and swap? +var bigHonkinMutex sync.Mutex + +func loadFunc(addr **Func) (s *Func) { + bigHonkinMutex.Lock() + s = *addr + bigHonkinMutex.Unlock() + return s +} + +func compareAndSwapFunc(addr **Func, old, new *Func) bool { + bigHonkinMutex.Lock() + val := *addr + if val == old { + *addr = new + bigHonkinMutex.Unlock() + return true + } + bigHonkinMutex.Unlock() + return false +} + +func loadTraceWatcherRef(addr **traceWatcherRef) (val *traceWatcherRef) { + bigHonkinMutex.Lock() + val = *addr + bigHonkinMutex.Unlock() + return val +} + +func storeTraceWatcherRef(addr **traceWatcherRef, val *traceWatcherRef) { + bigHonkinMutex.Lock() + *addr = val + bigHonkinMutex.Unlock() +} + +func compareAndSwapSpanObserverTuple(addr **spanObserverTuple, + old, new *spanObserverTuple) bool { + bigHonkinMutex.Lock() + val := *addr + if val == old { + *addr = new + bigHonkinMutex.Unlock() + return true + } + bigHonkinMutex.Unlock() + return false +} + +func loadSpanObserverTuple(addr **spanObserverTuple) (val *spanObserverTuple) { + bigHonkinMutex.Lock() + val = *addr + bigHonkinMutex.Unlock() + return val +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/cas_unsafe.go b/vendor/github.com/spacemonkeygo/monkit/v3/cas_unsafe.go new file mode 100644 index 000000000..380173772 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/cas_unsafe.go @@ -0,0 +1,70 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !appengine + +package monkit + +import ( + "sync/atomic" + "unsafe" +) + +// +// *Func atomic functions +// + +func loadFunc(addr **Func) (val *Func) { + return (*Func)(atomic.LoadPointer( + (*unsafe.Pointer)(unsafe.Pointer(addr)))) +} + +func compareAndSwapFunc(addr **Func, old, new *Func) bool { + return atomic.CompareAndSwapPointer( + (*unsafe.Pointer)(unsafe.Pointer(addr)), + unsafe.Pointer(old), + unsafe.Pointer(new)) +} + +// +// *traceWatcherRef atomic functions +// + +func loadTraceWatcherRef(addr **traceWatcherRef) (val *traceWatcherRef) { + return (*traceWatcherRef)(atomic.LoadPointer( + (*unsafe.Pointer)(unsafe.Pointer(addr)))) +} + +func storeTraceWatcherRef(addr **traceWatcherRef, val *traceWatcherRef) { + atomic.StorePointer( + (*unsafe.Pointer)(unsafe.Pointer(addr)), + unsafe.Pointer(val)) +} + +// +// *spanObserverTuple atomic functons +// + +func compareAndSwapSpanObserverTuple(addr **spanObserverTuple, + old, new *spanObserverTuple) bool { + return atomic.CompareAndSwapPointer( + (*unsafe.Pointer)(unsafe.Pointer(addr)), + unsafe.Pointer(old), + unsafe.Pointer(new)) +} + +func loadSpanObserverTuple(addr **spanObserverTuple) (val *spanObserverTuple) { + return (*spanObserverTuple)(atomic.LoadPointer( + (*unsafe.Pointer)(unsafe.Pointer(addr)))) +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/counter.go b/vendor/github.com/spacemonkeygo/monkit/v3/counter.go new file mode 100644 index 000000000..4f6f2be58 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/counter.go @@ -0,0 +1,129 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "math" + "sync" +) + +// Counter keeps track of running totals, along with the highest and lowest +// values seen. The overall value can increment or decrement. Counter +// implements StatSource. Should be constructed with NewCounter(), though it +// may be more convenient to use the Counter accessor on a given Scope. +// Expected creation is like: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// mon.Counter("beans").Inc(1) +// } +// +type Counter struct { + mtx sync.Mutex + val, low, high int64 + nonempty bool + key SeriesKey +} + +// NewCounter constructs a counter +func NewCounter(key SeriesKey) *Counter { + return &Counter{key: key} +} + +func (c *Counter) set(val int64) { + c.val = val + if !c.nonempty || val < c.low { + c.low = val + } + if !c.nonempty || c.high < val { + c.high = val + } + c.nonempty = true +} + +// Set will immediately change the value of the counter to whatever val is. It +// will appropriately update the high and low values, and return the former +// value. +func (c *Counter) Set(val int64) (former int64) { + c.mtx.Lock() + former = c.val + c.set(val) + c.mtx.Unlock() + return former +} + +// Inc will atomically increment the counter by delta and return the new value. +func (c *Counter) Inc(delta int64) (current int64) { + c.mtx.Lock() + c.set(c.val + delta) + current = c.val + c.mtx.Unlock() + return current +} + +// Dec will atomically decrement the counter by delta and return the new value. +func (c *Counter) Dec(delta int64) (current int64) { + return c.Inc(-delta) +} + +// High returns the highest value seen since construction or the last reset +func (c *Counter) High() (h int64) { + c.mtx.Lock() + h = c.high + c.mtx.Unlock() + return h +} + +// Low returns the lowest value seen since construction or the last reset +func (c *Counter) Low() (l int64) { + c.mtx.Lock() + l = c.low + c.mtx.Unlock() + return l +} + +// Current returns the current value +func (c *Counter) Current() (cur int64) { + c.mtx.Lock() + cur = c.val + c.mtx.Unlock() + return cur +} + +// Reset resets all values including high/low counters and returns what they +// were. +func (c *Counter) Reset() (val, low, high int64) { + c.mtx.Lock() + val, low, high = c.val, c.low, c.high + c.val, c.low, c.high, c.nonempty = 0, 0, 0, false + c.mtx.Unlock() + return val, low, high +} + +// Stats implements the StatSource interface +func (c *Counter) Stats(cb func(key SeriesKey, field string, val float64)) { + c.mtx.Lock() + val, low, high, nonempty := c.val, c.low, c.high, c.nonempty + c.mtx.Unlock() + if nonempty { + cb(c.key, "high", float64(high)) + cb(c.key, "low", float64(low)) + } else { + cb(c.key, "high", math.NaN()) + cb(c.key, "low", math.NaN()) + } + cb(c.key, "value", float64(val)) +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go b/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go new file mode 100644 index 000000000..c0a36b02a --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/ctx.go @@ -0,0 +1,363 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "context" + "sync" + "time" + + "github.com/spacemonkeygo/monkit/v3/monotime" +) + +// Span represents a 'span' of execution. A span is analogous to a stack frame. +// Spans are constructed as a side-effect of Tasks. +type Span struct { + // sync/atomic things + mtx spinLock + + // immutable things from construction + id int64 + start time.Time + f *Func + trace *Trace + parent *Span + args []interface{} + context.Context + + // protected by mtx + done bool + orphaned bool + children spanBag + annotations []Annotation +} + +// SpanFromCtx loads the current Span from the given context. This assumes +// the context already had a Span created through a Task. +func SpanFromCtx(ctx context.Context) *Span { + if s, ok := ctx.(*Span); ok && s != nil { + return s + } else if s, ok := ctx.Value(spanKey).(*Span); ok && s != nil { + return s + } + return nil +} + +func newSpan(ctx context.Context, f *Func, args []interface{}, + id int64, trace *Trace) (sctx context.Context, exit func(*error)) { + + var s, parent *Span + if s, ok := ctx.(*Span); ok && s != nil { + ctx = s.Context + if trace == nil { + parent = s + trace = parent.trace + } + } else if s, ok := ctx.Value(spanKey).(*Span); ok && s != nil { + if trace == nil { + parent = s + trace = parent.trace + } + } else if trace == nil { + trace = NewTrace(id) + f.scope.r.observeTrace(trace) + } + + observer := trace.getObserver() + + s = &Span{ + id: id, + start: monotime.Now(), + f: f, + trace: trace, + parent: parent, + args: args, + Context: ctx, + } + + trace.incrementSpans() + + if parent != nil { + f.start(parent.f) + parent.addChild(s) + } else { + f.start(nil) + f.scope.r.rootSpanStart(s) + } + + sctx = s + if observer != nil { + sctx = observer.Start(sctx, s) + } + + return sctx, func(errptr *error) { + rec := recover() + panicked := rec != nil + + finish := monotime.Now() + + var err error + if errptr != nil { + err = *errptr + } + s.f.end(err, panicked, finish.Sub(s.start)) + + var children []*Span + s.mtx.Lock() + s.done = true + orphaned := s.orphaned + s.children.Iterate(func(child *Span) { + children = append(children, child) + }) + s.mtx.Unlock() + for _, child := range children { + child.orphan() + } + + if s.parent != nil { + s.parent.removeChild(s) + if orphaned { + s.f.scope.r.orphanEnd(s) + } + } else { + s.f.scope.r.rootSpanEnd(s) + } + + trace.decrementSpans() + + // Re-fetch the observer, in case the value has changed since newSpan + // was called + if observer := trace.getObserver(); observer != nil { + observer.Finish(sctx, s, err, panicked, finish) + } + + if panicked { + panic(rec) + } + } +} + +var taskSecret context.Context = &taskSecretT{} + +// Tasks are created (sometimes implicitly) from Funcs. A Task should be called +// at the start of a monitored task, and its return value should be called +// at the stop of said task. +type Task func(ctx *context.Context, args ...interface{}) func(*error) + +// Task returns a new Task for use, creating an associated Func if necessary. +// It also adds a new Span to the given ctx during execution. Expected usage +// like: +// +// var mon = monkit.Package() +// +// func MyFunc(ctx context.Context, arg1, arg2 string) (err error) { +// defer mon.Task()(&ctx, arg1, arg2)(&err) +// ... +// } +// +// or +// +// var ( +// mon = monkit.Package() +// funcTask = mon.Task() +// ) +// +// func MyFunc(ctx context.Context, arg1, arg2 string) (err error) { +// defer funcTask(&ctx, arg1, arg2)(&err) +// ... +// } +// +// Task allows you to include SeriesTags. WARNING: Each unique tag key/value +// combination creates a unique Func and a unique series. SeriesTags should +// only be used for low-cardinality values that you intentionally wish to +// result in a unique series. Example: +// +// func MyFunc(ctx context.Context, arg1, arg2 string) (err error) { +// defer mon.Task(monkit.NewSeriesTag("key1", "val1"))(&ctx)(&err) +// ... +// } +// +// Task uses runtime.Caller to determine the associated Func name. See +// TaskNamed if you want to supply your own name. See Func.Task if you already +// have a Func. +// +// If you want to control Trace creation, see Func.ResetTrace and +// Func.RemoteTrace +func (s *Scope) Task(tags ...SeriesTag) Task { + var initOnce sync.Once + var f *Func + init := func() { + f = s.FuncNamed(callerFunc(3), tags...) + } + return Task(func(ctx *context.Context, + args ...interface{}) func(*error) { + ctx = cleanCtx(ctx) + if ctx == &taskSecret && taskArgs(f, args) { + return nil + } + initOnce.Do(init) + s, exit := newSpan(*ctx, f, args, NewId(), nil) + if ctx != &unparented { + *ctx = s + } + return exit + }) +} + +// Task returns a new Task for use on this Func. It also adds a new Span to +// the given ctx during execution. +// +// var mon = monkit.Package() +// +// func MyFunc(ctx context.Context, arg1, arg2 string) (err error) { +// f := mon.Func() +// defer f.Task(&ctx, arg1, arg2)(&err) +// ... +// } +// +// It's more expected for you to use mon.Task directly. See RemoteTrace or +// ResetTrace if you want greater control over creating new traces. +func (f *Func) Task(ctx *context.Context, args ...interface{}) func(*error) { + ctx = cleanCtx(ctx) + if ctx == &taskSecret && taskArgs(f, args) { + return nil + } + s, exit := newSpan(*ctx, f, args, NewId(), nil) + if ctx != &unparented { + *ctx = s + } + return exit +} + +// RemoteTrace is like Func.Task, except you can specify the trace and span id. +// Needed for things like the Zipkin plugin. +func (f *Func) RemoteTrace(ctx *context.Context, spanId int64, trace *Trace, + args ...interface{}) func(*error) { + ctx = cleanCtx(ctx) + if trace != nil { + f.scope.r.observeTrace(trace) + } + s, exit := newSpan(*ctx, f, args, spanId, trace) + if ctx != &unparented { + *ctx = s + } + return exit +} + +// ResetTrace is like Func.Task, except it always creates a new Trace. +func (f *Func) ResetTrace(ctx *context.Context, + args ...interface{}) func(*error) { + ctx = cleanCtx(ctx) + if ctx == &taskSecret && taskArgs(f, args) { + return nil + } + trace := NewTrace(NewId()) + f.scope.r.observeTrace(trace) + s, exit := newSpan(*ctx, f, args, trace.Id(), trace) + if ctx != &unparented { + *ctx = s + } + return exit +} + +var unparented = context.Background() + +func cleanCtx(ctx *context.Context) *context.Context { + if ctx == nil { + return &unparented + } + if *ctx == nil { + *ctx = context.Background() + // possible upshot of what we just did: + // + // func MyFunc(ctx context.Context) { + // // ctx == nil here + // defer mon.Task()(&ctx)(nil) + // // ctx != nil here + // } + // + // func main() { MyFunc(nil) } + // + } + return ctx +} + +// SpanCtxObserver is the interface plugins must implement if they want to observe +// all spans on a given trace as they happen, or add to contexts as they +// pass through mon.Task()(&ctx)(&err) calls. +type SpanCtxObserver interface { + // Start is called when a Span starts. Start should return the context + // this span should use going forward. ctx is the context it is currently + // using. + Start(ctx context.Context, s *Span) context.Context + + // Finish is called when a Span finishes, along with an error if any, whether + // or not it panicked, and what time it finished. + Finish(ctx context.Context, s *Span, err error, panicked bool, finish time.Time) +} + +type spanObserverToSpanCtxObserver struct { + observer SpanObserver +} + +func (so spanObserverToSpanCtxObserver) Start(ctx context.Context, s *Span) context.Context { + so.observer.Start(s) + return ctx +} + +func (so spanObserverToSpanCtxObserver) Finish(ctx context.Context, s *Span, err error, panicked bool, finish time.Time) { + so.observer.Finish(s, err, panicked, finish) +} + +type spanObserverTuple struct { + // cdr is atomic + cdr *spanObserverTuple + // car never changes + car SpanCtxObserver +} + +func (l *spanObserverTuple) Start(ctx context.Context, s *Span) context.Context { + ctx = l.car.Start(ctx, s) + cdr := loadSpanObserverTuple(&l.cdr) + if cdr != nil { + ctx = cdr.Start(ctx, s) + } + return ctx +} + +func (l *spanObserverTuple) Finish(ctx context.Context, s *Span, err error, panicked bool, + finish time.Time) { + l.car.Finish(ctx, s, err, panicked, finish) + cdr := loadSpanObserverTuple(&l.cdr) + if cdr != nil { + cdr.Finish(ctx, s, err, panicked, finish) + } +} + +type resetContext struct { + context.Context +} + +func (r resetContext) Value(key interface{}) interface{} { + if key == spanKey { + return nil + } + return r.Context.Value(key) +} + +// ResetContextSpan returns a new context with Span information removed. +func ResetContextSpan(ctx context.Context) context.Context { + return resetContext{Context: ctx} +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/dist.go b/vendor/github.com/spacemonkeygo/monkit/v3/dist.go new file mode 100644 index 000000000..e3f24e432 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/dist.go @@ -0,0 +1,61 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "time" +) + +const ( + ReservoirSize = 64 +) + +var ( + // If Window is > 0, the probability of replacing a datapoint will never + // fall below ReservoirSize/Window instead of continuing to fall over time. + // Window should be a multiple of ReservoirSize. + Window int64 = 1024 +) + +// ObservedQuantiles is the set of quantiles the internal distribution +// measurement logic will try to optimize for, if applicable. +var ObservedQuantiles = []float64{0, .1, .25, .5, .75, .9, .95, 1} + +type float32Slice []float32 + +func (p float32Slice) Len() int { return len(p) } +func (p float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p float32Slice) Less(i, j int) bool { + // N.B.: usually, float comparisons should check if either value is NaN, but + // in this package's usage, they never are here. + return p[i] < p[j] +} + +//go:generate sh -c "m4 -D_IMPORT_='\"time\"' -D_NAME_=Duration -D_LOWER_NAME_=duration -D_TYPE_=time.Duration distgen.go.m4 > durdist.go" +//go:generate sh -c "m4 -D_IMPORT_= -D_NAME_=Float -D_LOWER_NAME_=float -D_TYPE_=float64 distgen.go.m4 > floatdist.go" +//go:generate sh -c "m4 -D_IMPORT_= -D_NAME_=Int -D_LOWER_NAME_=int -D_TYPE_=int64 distgen.go.m4 > intdist.go" +//go:generate gofmt -w -s durdist.go floatdist.go intdist.go + +func (d *DurationDist) toFloat64(v time.Duration) float64 { + return v.Seconds() +} + +func (d *IntDist) toFloat64(v int64) float64 { + return float64(v) +} + +func (d *FloatDist) toFloat64(v float64) float64 { + return v +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/distgen.go.m4 b/vendor/github.com/spacemonkeygo/monkit/v3/distgen.go.m4 new file mode 100644 index 000000000..1d01dc315 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/distgen.go.m4 @@ -0,0 +1,185 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// WARNING: THE NON-M4 VERSIONS OF THIS FILE ARE GENERATED BY GO GENERATE! +// ONLY MAKE CHANGES TO THE M4 FILE +// + +package monkit + +import ( + "sort" + _IMPORT_ +) + +// _NAME_`Dist' keeps statistics about values such as +// low/high/recent/average/quantiles. Not threadsafe. Construct with +// `New'_NAME_`Dist'(). Fields are expected to be read from but not written to. +type _NAME_`Dist' struct { + // Low and High are the lowest and highest values observed since + // construction or the last reset. + Low, High _TYPE_ + + // Recent is the last observed value. + Recent _TYPE_ + + // Count is the number of observed values since construction or the last + // reset. + Count int64 + + // Sum is the sum of all the observed values since construction or the last + // reset. + Sum _TYPE_ + + key SeriesKey + reservoir [ReservoirSize]float32 + rng xorshift128 + sorted bool +} + +func `init'_NAME_`Dist'(v *_NAME_`Dist', key SeriesKey) { + v.key = key + v.rng = newXORShift128() +} + +// `New'_NAME_`Dist' creates a distribution of _TYPE_`s'. +func `New'_NAME_`Dist'(key SeriesKey) (d *_NAME_`Dist') { + d = &_NAME_`Dist'{} + `init'_NAME_`Dist'(d, key) + return d +} + +// Insert adds a value to the distribution, updating appropriate values. +func (d *_NAME_`Dist') Insert(val _TYPE_) { + if d.Count != 0 { + if val < d.Low { + d.Low = val + } + if val > d.High { + d.High = val + } + } else { + d.Low = val + d.High = val + } + d.Recent = val + d.Sum += val + + index := d.Count + d.Count += 1 + + if index < ReservoirSize { + d.reservoir[index] = float32(val) + d.sorted = false + } else { + window := d.Count + // careful, the capitalization of Window is important + if Window > 0 && window > Window { + window = Window + } + // fast, but kind of biased. probably okay + j := d.rng.Uint64() % uint64(window) + if j < ReservoirSize { + d.reservoir[int(j)] = float32(val) + d.sorted = false + } + } +} + +// FullAverage calculates and returns the average of all inserted values. +func (d *_NAME_`Dist') FullAverage() _TYPE_ { + if d.Count > 0 { + return d.Sum / _TYPE_`(d.Count)' + } + return 0 +} + +// ReservoirAverage calculates the average of the current reservoir. +func (d *_NAME_`Dist') ReservoirAverage() _TYPE_ { + amount := ReservoirSize + if d.Count < int64(amount) { + amount = int(d.Count) + } + if amount <= 0 { + return 0 + } + var sum float32 + for i := 0; i < amount; i++ { + sum += d.reservoir[i] + } + return _TYPE_`(sum / float32(amount))' +} + +// Query will return the approximate value at the given quantile from the +// reservoir, where 0 <= quantile <= 1. +func (d *_NAME_`Dist') Query(quantile float64) _TYPE_ { + rlen := int(ReservoirSize) + if int64(rlen) > d.Count { + rlen = int(d.Count) + } + + if rlen < 2 { + return _TYPE_`(d.reservoir[0])' + } + + reservoir := d.reservoir[:rlen] + if !d.sorted { + sort.Sort(float32Slice(reservoir)) + d.sorted = true + } + + if quantile <= 0 { + return _TYPE_`(reservoir[0])' + } + if quantile >= 1 { + return _TYPE_`(reservoir[rlen-1])' + } + + idx_float := quantile * float64(rlen-1) + idx := int(idx_float) + + diff := idx_float - float64(idx) + prior := float64(reservoir[idx]) + return _TYPE_`(prior + diff*(float64(reservoir[idx+1])-prior))' +} + +// Copy returns a full copy of the entire distribution. +func (d *_NAME_`Dist') Copy() *_NAME_`Dist' { + cp := *d + cp.rng = newXORShift128() + return &cp +} + +func (d *_NAME_`Dist') Reset() { + d.Low, d.High, d.Recent, d.Count, d.Sum = 0, 0, 0, 0, 0 + // resetting count will reset the quantile reservoir +} + +func (d *_NAME_`Dist') Stats(cb func(key SeriesKey, field string, val float64)) { + count := d.Count + cb(d.key, "count", float64(count)) + if count > 0 { + cb(d.key, "sum", d.toFloat64(d.Sum)) + cb(d.key, "min", d.toFloat64(d.Low)) + cb(d.key, "avg", d.toFloat64(d.FullAverage())) + cb(d.key, "max", d.toFloat64(d.High)) + cb(d.key, "rmin", d.toFloat64(d.Query(0))) + cb(d.key, "ravg", d.toFloat64(d.ReservoirAverage())) + cb(d.key, "r10", d.toFloat64(d.Query(.1))) + cb(d.key, "r50", d.toFloat64(d.Query(.5))) + cb(d.key, "r90", d.toFloat64(d.Query(.9))) + cb(d.key, "rmax", d.toFloat64(d.Query(1))) + cb(d.key, "recent", d.toFloat64(d.Recent)) + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/doc.go b/vendor/github.com/spacemonkeygo/monkit/v3/doc.go new file mode 100644 index 000000000..6bf0d5dd1 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/doc.go @@ -0,0 +1,579 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package monkit is a flexible code instrumenting and data collection library. + +I'm going to try and sell you as fast as I can on this library. + +Example usage + + package main + + import ( + "context" + "fmt" + "log" + "net/http" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/spacemonkeygo/monkit/v3/environment" + "github.com/spacemonkeygo/monkit/v3/present" + ) + + var ( + mon = monkit.Package() + ) + + func ComputeThing(ctx context.Context, arg1, arg2 int) (res int, err error) { + defer mon.Task()(&ctx)(&err) + + timer := mon.Timer("subcomputation").Start() + res = arg1 + arg2 + timer.Stop() + + if res == 3 { + mon.Event("hit 3") + } + + mon.BoolVal("was-4").Observe(res == 4) + mon.IntVal("res").Observe(int64(res)) + mon.Counter("calls").Inc(1) + mon.Gauge("arg1", func() float64 { return float64(arg1) }) + mon.Meter("arg2").Mark(arg2) + + return arg1 + arg2, nil + } + + func DoStuff(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + + result, err := ComputeThing(ctx, 1, 2) + if err != nil { + return err + } + + fmt.Println(result) + return + } + + func main() { + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + log.Println(DoStuff(context.Background())) + } + +Metrics + +We've got tools that capture distribution information (including quantiles) +about int64, float64, and bool types. We have tools that capture data about +events (we've got meters for deltas, rates, etc). We have rich tools for +capturing information about tasks and functions, and literally anything that +can generate a name and a number. + +Almost just as importantly, the amount of boilerplate and code you have to +write to get these features is very minimal. Data that's hard to measure +probably won't get measured. + +This data can be collected and sent to Graphite (http://graphite.wikidot.com/) +or any other time-series database. + +Here's a selection of live stats from one of our storage nodes: + + env.os.fds 120.000000 + env.os.proc.stat.Minflt 81155.000000 + env.os.proc.stat.Cminflt 11789.000000 + env.os.proc.stat.Majflt 10.000000 + env.os.proc.stat.Cmajflt 6.000000 + ... + + env.process.control 1.000000 + env.process.crc 3819014369.000000 + env.process.uptime 163225.292925 + env.runtime.goroutines 52.000000 + env.runtime.memory.Alloc 2414080.000000 + ... + + env.rusage.Maxrss 26372.000000 + ... + + sm/flud/csl/client.(*CSLClient).Verify.current 0.000000 + sm/flud/csl/client.(*CSLClient).Verify.success 788.000000 + sm/flud/csl/client.(*CSLClient).Verify.error volume missing 91.000000 + sm/flud/csl/client.(*CSLClient).Verify.error dial error 1.000000 + sm/flud/csl/client.(*CSLClient).Verify.panics 0.000000 + sm/flud/csl/client.(*CSLClient).Verify.success times min 0.102214 + sm/flud/csl/client.(*CSLClient).Verify.success times avg 1.899133 + sm/flud/csl/client.(*CSLClient).Verify.success times max 8.601230 + sm/flud/csl/client.(*CSLClient).Verify.success times recent 2.673128 + sm/flud/csl/client.(*CSLClient).Verify.failure times min 0.682881 + sm/flud/csl/client.(*CSLClient).Verify.failure times avg 3.936571 + sm/flud/csl/client.(*CSLClient).Verify.failure times max 6.102318 + sm/flud/csl/client.(*CSLClient).Verify.failure times recent 2.208020 + sm/flud/csl/server.store.avg 710800.000000 + sm/flud/csl/server.store.count 271.000000 + sm/flud/csl/server.store.max 3354194.000000 + sm/flud/csl/server.store.min 467.000000 + sm/flud/csl/server.store.recent 1661376.000000 + sm/flud/csl/server.store.sum 192626890.000000 + ... + +Call graphs + +This library generates call graphs of your live process for you. + +These call graphs aren't created through sampling. They're full pictures of all +of the interesting functions you've annotated, along with quantile information +about their successes, failures, how often they panic, return an error (if so +instrumented), how many are currently running, etc. + +The data can be returned in dot format, in json, in text, and can be about +just the functions that are currently executing, or all the functions the +monitoring system has ever seen. + +Here's another example of one of our production nodes: + +https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/callgraph2.png + +Trace graphs + +This library generates trace graphs of your live process for you directly, +without requiring standing up some tracing system such as Zipkin (though you +can do that too). + +Inspired by Google's Dapper (http://research.google.com/pubs/pub36356.html) +and Twitter's Zipkin (http://zipkin.io), we have process-internal trace +graphs, triggerable by a number of different methods. + +You get this trace information for free whenever you use +Go contexts (https://blog.golang.org/context) and function monitoring. The +output formats are svg and json. + +Additionally, the library supports trace observation plugins, and we've written +a plugin that sends this data to Zipkin (http://github.com/spacemonkeygo/monkit-zipkin). + +https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/trace.png + +History + +Before our crazy Go rewrite of everything (https://www.spacemonkey.com/blog/posts/go-space-monkey) +(and before we had even seen Google's Dapper paper), we were a Python shop, and +all of our "interesting" functions were decorated with a helper that collected +timing information and sent it to Graphite. + +When we transliterated to Go, we wanted to preserve that functionality, so the +first version of our monitoring package was born. + +Over time it started to get janky, especially as we found Zipkin and started +adding tracing functionality to it. We rewrote all of our Go code to use Google +contexts, and then realized we could get call graph information. We decided a +refactor and then an all-out rethinking of our monitoring package was best, +and so now we have this library. + +Aside about contexts + +Sometimes you really want callstack contextual information without having to +pass arguments through everything on the call stack. In other languages, many +people implement this with thread-local storage. + +Example: let's say you have written a big system that responds to user +requests. All of your libraries log using your log library. During initial +development everything is easy to debug, since there's low user load, but now +you've scaled and there's OVER TEN USERS and it's kind of hard to tell what log +lines were caused by what. Wouldn't it be nice to add request ids to all of the +log lines kicked off by that request? Then you could grep for all log lines +caused by a specific request id. Geez, it would suck to have to pass all +contextual debugging information through all of your callsites. + +Google solved this problem by always passing a context.Context interface +through from call to call. A Context is basically just a mapping of arbitrary +keys to arbitrary values that users can add new values for. This way if you +decide to add a request context, you can add it to your Context and then all +callsites that decend from that place will have the new data in their contexts. + +It is admittedly very verbose to add contexts to every function call. +Painfully so. I hope to write more about it in the future, but Google also +wrote up their thoughts about it (https://blog.golang.org/context), which you +can go read. For now, just swallow your disgust and let's keep moving. + +Motivating program + +Let's make a super simple Varnish (https://www.varnish-cache.org/) clone. +Open up gedit! (Okay just kidding, open whatever text editor you want.) + +For this motivating program, we won't even add the caching, though there's +comments for where to add it if you'd like. For now, let's just make a +barebones system that will proxy HTTP requests. We'll call it VLite, but +maybe we should call it VReallyLite. + + package main + + import ( + "flag" + "net/http" + "net/http/httputil" + "net/url" + ) + + type VLite struct { + target *url.URL + proxy *httputil.ReverseProxy + } + + func NewVLite(target *url.URL) *VLite { + return &VLite{ + target: target, + proxy: httputil.NewSingleHostReverseProxy(target), + } + } + + func (v *VLite) Proxy(w http.ResponseWriter, r *http.Request) { + r.Host = v.target.Host // let the proxied server get the right vhost + v.proxy.ServeHTTP(w, r) + } + + func (v *VLite) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // here's where you'd put caching logic + v.Proxy(w, r) + } + + func main() { + target := flag.String( + "proxy", + "http://hasthelargehadroncolliderdestroyedtheworldyet.com/", + "server to cache") + flag.Parse() + targetURL, err := url.Parse(*target) + if err != nil { + panic(err) + } + panic(http.ListenAndServe(":8080", NewVLite(targetURL))) + } + +Run and build this and open localhost:8080 in your browser. If you use the +default proxy target, it should inform you that the world hasn't been +destroyed yet. + +Adding basic instrumentation + +The first thing you'll want to do is add the small amount of boilerplate to +make the instrumentation we're going to add to your process observable later. + +Import the basic monkit packages: + + "github.com/spacemonkeygo/monkit/v3" + "github.com/spacemonkeygo/monkit/v3/environment" + "github.com/spacemonkeygo/monkit/v3/present" + +and then register environmental statistics and kick off a goroutine in your +main method to serve debug requests: + + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + +Rebuild, and then check out localhost:9000/stats (or +localhost:9000/stats/json, if you prefer) in your browser! + +Request contexts + +Remember what I said about Google's contexts (https://blog.golang.org/context)? +It might seem a bit overkill for such a small project, but it's time to add +them. + +To help out here, I've created a library that constructs contexts for you +for incoming HTTP requests. Nothing that's about to happen requires my +webhelp library (https://godoc.org/github.com/jtolds/webhelp), but here is the +code now refactored to receive and pass contexts through our two per-request +calls. + + package main + + import ( + "context" + "flag" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/jtolds/webhelp" + "github.com/spacemonkeygo/monkit/v3" + "github.com/spacemonkeygo/monkit/v3/environment" + "github.com/spacemonkeygo/monkit/v3/present" + ) + + type VLite struct { + target *url.URL + proxy *httputil.ReverseProxy + } + + func NewVLite(target *url.URL) *VLite { + return &VLite{ + target: target, + proxy: httputil.NewSingleHostReverseProxy(target), + } + } + + func (v *VLite) Proxy(ctx context.Context, w http.ResponseWriter, r *http.Request) { + r.Host = v.target.Host // let the proxied server get the right vhost + v.proxy.ServeHTTP(w, r) + } + + func (v *VLite) HandleHTTP(ctx context.Context, w webhelp.ResponseWriter, r *http.Request) error { + // here's where you'd put caching logic + v.Proxy(ctx, w, r) + return nil + } + + func main() { + target := flag.String( + "proxy", + "http://hasthelargehadroncolliderdestroyedtheworldyet.com/", + "server to cache") + flag.Parse() + targetURL, err := url.Parse(*target) + if err != nil { + panic(err) + } + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + panic(webhelp.ListenAndServe(":8080", NewVLite(targetURL))) + } + +You can create a new context for a request however you want. One reason to use +something like webhelp is that the cancelation feature of Contexts is hooked +up to the HTTP request getting canceled. + +Monitor some requests + +Let's start to get statistics about how many requests we receive! First, this +package (main) will need to get a monitoring Scope. Add this global definition +right after all your imports, much like you'd create a logger with many logging +libraries: + + var mon = monkit.Package() + +Now, make the error return value of HandleHTTP named (so, (err error)), and add +this defer line as the very first instruction of HandleHTTP: + + func (v *VLite) HandleHTTP(ctx context.Context, w webhelp.ResponseWriter, r *http.Request) (err error) { + defer mon.Task()(&ctx)(&err) + +Let's also add the same line (albeit modified for the lack of error) to +Proxy, replacing &err with nil: + + func (v *VLite) Proxy(ctx context.Context, w http.ResponseWriter, r *http.Request) { + defer mon.Task()(&ctx)(nil) + +You should now have something like: + + package main + + import ( + "context" + "flag" + "net/http" + "net/http/httputil" + "net/url" + + "github.com/jtolds/webhelp" + "github.com/spacemonkeygo/monkit/v3" + "github.com/spacemonkeygo/monkit/v3/environment" + "github.com/spacemonkeygo/monkit/v3/present" + ) + + var mon = monkit.Package() + + type VLite struct { + target *url.URL + proxy *httputil.ReverseProxy + } + + func NewVLite(target *url.URL) *VLite { + return &VLite{ + target: target, + proxy: httputil.NewSingleHostReverseProxy(target), + } + } + + func (v *VLite) Proxy(ctx context.Context, w http.ResponseWriter, r *http.Request) { + defer mon.Task()(&ctx)(nil) + r.Host = v.target.Host // let the proxied server get the right vhost + v.proxy.ServeHTTP(w, r) + } + + func (v *VLite) HandleHTTP(ctx context.Context, w webhelp.ResponseWriter, r *http.Request) (err error) { + defer mon.Task()(&ctx)(&err) + // here's where you'd put caching logic + v.Proxy(ctx, w, r) + return nil + } + + func main() { + target := flag.String( + "proxy", + "http://hasthelargehadroncolliderdestroyedtheworldyet.com/", + "server to cache") + flag.Parse() + targetURL, err := url.Parse(*target) + if err != nil { + panic(err) + } + environment.Register(monkit.Default) + go http.ListenAndServe("localhost:9000", present.HTTP(monkit.Default)) + panic(webhelp.ListenAndServe(":8080", NewVLite(targetURL))) + } + +We'll unpack what's going on here, but for now: + + * Rebuild and restart! + * Trigger a full refresh at localhost:8080 to make sure your new HTTP + handler runs + * Visit localhost:9000/stats and then localhost:9000/funcs + +For this new funcs dataset, if you want a graph, you can download a dot +graph at localhost:9000/funcs/dot and json information from +localhost:9000/funcs/json. + +You should see something like: + + [3693964236144930897] main.(*VLite).HandleHTTP + parents: entry + current: 0, highwater: 1, success: 2, errors: 0, panics: 0 + success times: + 0.00: 63.930436ms + 0.10: 70.482159ms + 0.25: 80.309745ms + 0.50: 96.689054ms + 0.75: 113.068363ms + 0.90: 122.895948ms + 0.95: 126.17181ms + 1.00: 129.447675ms + avg: 96.689055ms + failure times: + 0.00: 0 + 0.10: 0 + 0.25: 0 + 0.50: 0 + 0.75: 0 + 0.90: 0 + 0.95: 0 + 1.00: 0 + avg: 0 + +with a similar report for the Proxy method, or a graph like: + +https://raw.githubusercontent.com/spacemonkeygo/monkit/master/images/handlehttp.png + +This data reports the overall callgraph of execution for known traces, along +with how many of each function are currently running, the most running +concurrently (the highwater), how many were successful along with quantile +timing information, how many errors there were (with quantile timing +information if applicable), and how many panics there were. Since the Proxy +method isn't capturing a returned err value, and since HandleHTTP always +returns nil, this example won't ever have failures. + +If you're wondering about the success count being higher than you expected, +keep in mind your browser probably requested a favicon.ico. + +Cool, eh? + +How it works + + defer mon.Task()(&ctx)(&err) + +is an interesting line of code - there's three function calls. If you look at +the Go spec, all of the function calls will run at the time the function starts +except for the very last one. + +The first function call, mon.Task(), creates or looks up a wrapper around a +Func. You could get this yourself by requesting mon.Func() inside of the +appropriate function or mon.FuncNamed(). Both mon.Task() and mon.Func() +are inspecting runtime.Caller to determine the name of the function. Because +this is a heavy operation, you can actually store the result of mon.Task() and +reuse it somehow else if you prefer, so instead of + + func MyFunc(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + } + +you could instead use + + var myFuncMon = mon.Task() + + func MyFunc(ctx context.Context) (err error) { + defer myFuncMon(&ctx)(&err) + } + +which is more performant every time after the first time. runtime.Caller only +gets called once. + +Careful! Don't use the same myFuncMon in different functions unless you want to +screw up your statistics! + +The second function call starts all the various stop watches and bookkeeping to +keep track of the function. It also mutates the context pointer it's given to +extend the context with information about what current span (in Zipkin +parlance) is active. Notably, you *can* pass nil for the context if you really +don't want a context. You just lose callgraph information. + +The last function call stops all the stop watches ad makes a note of any +observed errors or panics (it repanics after observing them). + +Tracing + +Turns out, we don't even need to change our program anymore to get rich tracing +information! + +Open your browser and go to localhost:9000/trace/svg?regex=HandleHTTP. It +won't load, and in fact, it's waiting for you to open another tab and refresh +localhost:8080 again. Once you retrigger the actual application behavior, +the trace regex will capture a trace starting on the first function that +matches the supplied regex, and return an svg. Go back to your first tab, and +you should see a relatively uninteresting but super promising svg. + +Let's make the trace more interesting. Add a + + time.Sleep(200 * time.Millisecond) + +to your HandleHTTP method, rebuild, and restart. Load localhost:8080, then +start a new request to your trace URL, then reload localhost:8080 again. Flip +back to your trace, and you should see that the Proxy method only takes a +portion of the time of HandleHTTP! + +https://cdn.rawgit.com/spacemonkeygo/monkit/master/images/trace.svg + +There's multiple ways to select a trace. You can select by regex using the +preselect method (default), which first evaluates the regex on all known +functions for sanity checking. Sometimes, however, the function you want to +trace may not yet be known to monkit, in which case you'll want +to turn preselection off. You may have a bad regex, or you may be in this case +if you get the error "Bad Request: regex preselect matches 0 functions." + +Another way to select a trace is by providing a trace id, which we'll get to +next! + +Make sure to check out what the addition of the time.Sleep call did to the +other reports. + +Plugins + +It's easy to write plugins for monkit! Check out our first one that exports +data to Zipkin (http://zipkin.io/)'s Scribe API: + +https://github.com/spacemonkeygo/monkit-zipkin + +We plan to have more (for HTrace, OpenTracing, etc, etc), soon! + +*/ +package monkit // import "github.com/spacemonkeygo/monkit/v3" diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/durdist.go b/vendor/github.com/spacemonkeygo/monkit/v3/durdist.go new file mode 100644 index 000000000..f551669dd --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/durdist.go @@ -0,0 +1,185 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// WARNING: THE NON-M4 VERSIONS OF THIS FILE ARE GENERATED BY GO GENERATE! +// ONLY MAKE CHANGES TO THE M4 FILE +// + +package monkit + +import ( + "sort" + "time" +) + +// DurationDist keeps statistics about values such as +// low/high/recent/average/quantiles. Not threadsafe. Construct with +// NewDurationDist(). Fields are expected to be read from but not written to. +type DurationDist struct { + // Low and High are the lowest and highest values observed since + // construction or the last reset. + Low, High time.Duration + + // Recent is the last observed value. + Recent time.Duration + + // Count is the number of observed values since construction or the last + // reset. + Count int64 + + // Sum is the sum of all the observed values since construction or the last + // reset. + Sum time.Duration + + key SeriesKey + reservoir [ReservoirSize]float32 + rng xorshift128 + sorted bool +} + +func initDurationDist(v *DurationDist, key SeriesKey) { + v.key = key + v.rng = newXORShift128() +} + +// NewDurationDist creates a distribution of time.Durations. +func NewDurationDist(key SeriesKey) (d *DurationDist) { + d = &DurationDist{} + initDurationDist(d, key) + return d +} + +// Insert adds a value to the distribution, updating appropriate values. +func (d *DurationDist) Insert(val time.Duration) { + if d.Count != 0 { + if val < d.Low { + d.Low = val + } + if val > d.High { + d.High = val + } + } else { + d.Low = val + d.High = val + } + d.Recent = val + d.Sum += val + + index := d.Count + d.Count += 1 + + if index < ReservoirSize { + d.reservoir[index] = float32(val) + d.sorted = false + } else { + window := d.Count + // careful, the capitalization of Window is important + if Window > 0 && window > Window { + window = Window + } + // fast, but kind of biased. probably okay + j := d.rng.Uint64() % uint64(window) + if j < ReservoirSize { + d.reservoir[int(j)] = float32(val) + d.sorted = false + } + } +} + +// FullAverage calculates and returns the average of all inserted values. +func (d *DurationDist) FullAverage() time.Duration { + if d.Count > 0 { + return d.Sum / time.Duration(d.Count) + } + return 0 +} + +// ReservoirAverage calculates the average of the current reservoir. +func (d *DurationDist) ReservoirAverage() time.Duration { + amount := ReservoirSize + if d.Count < int64(amount) { + amount = int(d.Count) + } + if amount <= 0 { + return 0 + } + var sum float32 + for i := 0; i < amount; i++ { + sum += d.reservoir[i] + } + return time.Duration(sum / float32(amount)) +} + +// Query will return the approximate value at the given quantile from the +// reservoir, where 0 <= quantile <= 1. +func (d *DurationDist) Query(quantile float64) time.Duration { + rlen := int(ReservoirSize) + if int64(rlen) > d.Count { + rlen = int(d.Count) + } + + if rlen < 2 { + return time.Duration(d.reservoir[0]) + } + + reservoir := d.reservoir[:rlen] + if !d.sorted { + sort.Sort(float32Slice(reservoir)) + d.sorted = true + } + + if quantile <= 0 { + return time.Duration(reservoir[0]) + } + if quantile >= 1 { + return time.Duration(reservoir[rlen-1]) + } + + idx_float := quantile * float64(rlen-1) + idx := int(idx_float) + + diff := idx_float - float64(idx) + prior := float64(reservoir[idx]) + return time.Duration(prior + diff*(float64(reservoir[idx+1])-prior)) +} + +// Copy returns a full copy of the entire distribution. +func (d *DurationDist) Copy() *DurationDist { + cp := *d + cp.rng = newXORShift128() + return &cp +} + +func (d *DurationDist) Reset() { + d.Low, d.High, d.Recent, d.Count, d.Sum = 0, 0, 0, 0, 0 + // resetting count will reset the quantile reservoir +} + +func (d *DurationDist) Stats(cb func(key SeriesKey, field string, val float64)) { + count := d.Count + cb(d.key, "count", float64(count)) + if count > 0 { + cb(d.key, "sum", d.toFloat64(d.Sum)) + cb(d.key, "min", d.toFloat64(d.Low)) + cb(d.key, "avg", d.toFloat64(d.FullAverage())) + cb(d.key, "max", d.toFloat64(d.High)) + cb(d.key, "rmin", d.toFloat64(d.Query(0))) + cb(d.key, "ravg", d.toFloat64(d.ReservoirAverage())) + cb(d.key, "r10", d.toFloat64(d.Query(.1))) + cb(d.key, "r50", d.toFloat64(d.Query(.5))) + cb(d.key, "r90", d.toFloat64(d.Query(.9))) + cb(d.key, "rmax", d.toFloat64(d.Query(1))) + cb(d.key, "recent", d.toFloat64(d.Recent)) + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/error_names.go b/vendor/github.com/spacemonkeygo/monkit/v3/error_names.go new file mode 100644 index 000000000..01d16946f --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/error_names.go @@ -0,0 +1,114 @@ +// Copyright (C) 2017 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "context" + "io" + "net" + "os" + "sync" + "sync/atomic" +) + +// errorNameHandlers keeps track of the list of error name handlers monkit will +// call to give errors good metric names. +var errorNameHandlers struct { + write_mu sync.Mutex + value atomic.Value +} + +// AddErrorNameHandler adds an error name handler function that will be +// consulted every time an error is captured for a task. The handlers will be +// called in the order they were registered with the most recently added +// handler first, until a handler returns true for the second return value. +// If no handler returns true, the error is checked to see if it implements +// an interface that allows it to name itself, and otherwise, monkit attempts +// to find a good name for most built in Go standard library errors. +func AddErrorNameHandler(f func(error) (string, bool)) { + errorNameHandlers.write_mu.Lock() + defer errorNameHandlers.write_mu.Unlock() + + handlers, _ := errorNameHandlers.value.Load().([]func(error) (string, bool)) + handlers = append(handlers, f) + errorNameHandlers.value.Store(handlers) +} + +// getErrorName implements the logic described in the AddErrorNameHandler +// function. +func getErrorName(err error) string { + // check if any of the handlers will handle it + handlers, _ := errorNameHandlers.value.Load().([]func(error) (string, bool)) + for i := len(handlers) - 1; i >= 0; i-- { + if name, ok := handlers[i](err); ok { + return name + } + } + + // check if it knows how to name itself + type namer interface { + Name() (string, bool) + } + + if n, ok := err.(namer); ok { + if name, ok := n.Name(); ok { + return name + } + } + + // check if it's a known error that we handle to give good names + switch err { + case io.EOF: + return "EOF" + case io.ErrUnexpectedEOF: + return "Unexpected EOF Error" + case io.ErrClosedPipe: + return "Closed Pipe Error" + case io.ErrNoProgress: + return "No Progress Error" + case io.ErrShortBuffer: + return "Short Buffer Error" + case io.ErrShortWrite: + return "Short Write Error" + case context.Canceled: + return "Canceled" + case context.DeadlineExceeded: + return "Timeout" + } + if isErrnoError(err) { + return "Errno" + } + switch err.(type) { + case *os.SyscallError: + return "Syscall Error" + case net.UnknownNetworkError: + return "Unknown Network Error" + case *net.AddrError: + return "Addr Error" + case net.InvalidAddrError: + return "Invalid Addr Error" + case *net.OpError: + return "Net Op Error" + case *net.ParseError: + return "Net Parse Error" + case *net.DNSError: + return "DNS Error" + case *net.DNSConfigError: + return "DNS Config Error" + case net.Error: + return "Network Error" + } + return "System Error" +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/error_names_ae.go b/vendor/github.com/spacemonkeygo/monkit/v3/error_names_ae.go new file mode 100644 index 000000000..379a2340a --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/error_names_ae.go @@ -0,0 +1,21 @@ +// Copyright (C) 2017 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build appengine + +package monkit + +func isErrnoError(err error) bool { + return false +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/error_names_syscall.go b/vendor/github.com/spacemonkeygo/monkit/v3/error_names_syscall.go new file mode 100644 index 000000000..e42a7957b --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/error_names_syscall.go @@ -0,0 +1,24 @@ +// Copyright (C) 2017 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !appengine + +package monkit + +import "syscall" + +func isErrnoError(err error) bool { + _, ok := err.(syscall.Errno) + return ok +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/floatdist.go b/vendor/github.com/spacemonkeygo/monkit/v3/floatdist.go new file mode 100644 index 000000000..59e677a4c --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/floatdist.go @@ -0,0 +1,184 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// WARNING: THE NON-M4 VERSIONS OF THIS FILE ARE GENERATED BY GO GENERATE! +// ONLY MAKE CHANGES TO THE M4 FILE +// + +package monkit + +import ( + "sort" +) + +// FloatDist keeps statistics about values such as +// low/high/recent/average/quantiles. Not threadsafe. Construct with +// NewFloatDist(). Fields are expected to be read from but not written to. +type FloatDist struct { + // Low and High are the lowest and highest values observed since + // construction or the last reset. + Low, High float64 + + // Recent is the last observed value. + Recent float64 + + // Count is the number of observed values since construction or the last + // reset. + Count int64 + + // Sum is the sum of all the observed values since construction or the last + // reset. + Sum float64 + + key SeriesKey + reservoir [ReservoirSize]float32 + rng xorshift128 + sorted bool +} + +func initFloatDist(v *FloatDist, key SeriesKey) { + v.key = key + v.rng = newXORShift128() +} + +// NewFloatDist creates a distribution of float64s. +func NewFloatDist(key SeriesKey) (d *FloatDist) { + d = &FloatDist{} + initFloatDist(d, key) + return d +} + +// Insert adds a value to the distribution, updating appropriate values. +func (d *FloatDist) Insert(val float64) { + if d.Count != 0 { + if val < d.Low { + d.Low = val + } + if val > d.High { + d.High = val + } + } else { + d.Low = val + d.High = val + } + d.Recent = val + d.Sum += val + + index := d.Count + d.Count += 1 + + if index < ReservoirSize { + d.reservoir[index] = float32(val) + d.sorted = false + } else { + window := d.Count + // careful, the capitalization of Window is important + if Window > 0 && window > Window { + window = Window + } + // fast, but kind of biased. probably okay + j := d.rng.Uint64() % uint64(window) + if j < ReservoirSize { + d.reservoir[int(j)] = float32(val) + d.sorted = false + } + } +} + +// FullAverage calculates and returns the average of all inserted values. +func (d *FloatDist) FullAverage() float64 { + if d.Count > 0 { + return d.Sum / float64(d.Count) + } + return 0 +} + +// ReservoirAverage calculates the average of the current reservoir. +func (d *FloatDist) ReservoirAverage() float64 { + amount := ReservoirSize + if d.Count < int64(amount) { + amount = int(d.Count) + } + if amount <= 0 { + return 0 + } + var sum float32 + for i := 0; i < amount; i++ { + sum += d.reservoir[i] + } + return float64(sum / float32(amount)) +} + +// Query will return the approximate value at the given quantile from the +// reservoir, where 0 <= quantile <= 1. +func (d *FloatDist) Query(quantile float64) float64 { + rlen := int(ReservoirSize) + if int64(rlen) > d.Count { + rlen = int(d.Count) + } + + if rlen < 2 { + return float64(d.reservoir[0]) + } + + reservoir := d.reservoir[:rlen] + if !d.sorted { + sort.Sort(float32Slice(reservoir)) + d.sorted = true + } + + if quantile <= 0 { + return float64(reservoir[0]) + } + if quantile >= 1 { + return float64(reservoir[rlen-1]) + } + + idx_float := quantile * float64(rlen-1) + idx := int(idx_float) + + diff := idx_float - float64(idx) + prior := float64(reservoir[idx]) + return float64(prior + diff*(float64(reservoir[idx+1])-prior)) +} + +// Copy returns a full copy of the entire distribution. +func (d *FloatDist) Copy() *FloatDist { + cp := *d + cp.rng = newXORShift128() + return &cp +} + +func (d *FloatDist) Reset() { + d.Low, d.High, d.Recent, d.Count, d.Sum = 0, 0, 0, 0, 0 + // resetting count will reset the quantile reservoir +} + +func (d *FloatDist) Stats(cb func(key SeriesKey, field string, val float64)) { + count := d.Count + cb(d.key, "count", float64(count)) + if count > 0 { + cb(d.key, "sum", d.toFloat64(d.Sum)) + cb(d.key, "min", d.toFloat64(d.Low)) + cb(d.key, "avg", d.toFloat64(d.FullAverage())) + cb(d.key, "max", d.toFloat64(d.High)) + cb(d.key, "rmin", d.toFloat64(d.Query(0))) + cb(d.key, "ravg", d.toFloat64(d.ReservoirAverage())) + cb(d.key, "r10", d.toFloat64(d.Query(.1))) + cb(d.key, "r50", d.toFloat64(d.Query(.5))) + cb(d.key, "r90", d.toFloat64(d.Query(.9))) + cb(d.key, "rmax", d.toFloat64(d.Query(1))) + cb(d.key, "recent", d.toFloat64(d.Recent)) + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/func.go b/vendor/github.com/spacemonkeygo/monkit/v3/func.go new file mode 100644 index 000000000..bf791ff04 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/func.go @@ -0,0 +1,71 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "fmt" +) + +// Func represents a FuncStats bound to a particular function id, scope, and +// name. You should create a Func using the Func creation methods +// (Func/FuncNamed) on a Scope. If you want to manage installation bookkeeping +// yourself, create a FuncStats directly. Expected Func creation like: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// f := mon.Func() +// ... +// } +// +type Func struct { + // sync/atomic things + FuncStats + + // constructor things + id int64 + scope *Scope + key SeriesKey +} + +func newFunc(s *Scope, key SeriesKey) (f *Func) { + f = &Func{ + id: NewId(), + scope: s, + key: key, + } + initFuncStats(&f.FuncStats, key) + return f +} + +// ShortName returns the name of the function within the package +func (f *Func) ShortName() string { return f.key.Tags.Get("name") } + +// FullName returns the name of the function including the package +func (f *Func) FullName() string { + return fmt.Sprintf("%s.%s", f.scope.name, f.key.Tags.Get("name")) +} + +// Id returns a unique integer referencing this function +func (f *Func) Id() int64 { return f.id } + +// Scope references the Scope this Func is bound to +func (f *Func) Scope() *Scope { return f.scope } + +// Parents will call the given cb with all of the unique Funcs that so far +// have called this Func. +func (f *Func) Parents(cb func(f *Func)) { + f.FuncStats.parents(cb) +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/funcset.go b/vendor/github.com/spacemonkeygo/monkit/v3/funcset.go new file mode 100644 index 000000000..74ad06b41 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/funcset.go @@ -0,0 +1,78 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sync" +) + +// funcSet is a set data structure (keeps track of unique functions). funcSet +// has a fast path for dealing with cases where the set only has one element. +// +// to reduce memory usage for functions, funcSet exposes its mutex for use in +// other contexts +type funcSet struct { + // sync/atomic things + first *Func + + // protected by mtx + sync.Mutex + rest map[*Func]struct{} +} + +var ( + // used to signify that we've specifically added a nil function, since nil is + // used internally to specify an empty set. + nilFunc = &Func{} +) + +func (s *funcSet) Add(f *Func) { + if f == nil { + f = nilFunc + } + if loadFunc(&s.first) == f { + return + } + if compareAndSwapFunc(&s.first, nil, f) { + return + } + s.Mutex.Lock() + if s.rest == nil { + s.rest = map[*Func]struct{}{} + } + s.rest[f] = struct{}{} + s.Mutex.Unlock() +} + +// Iterate loops over all unique elements of the set. +func (s *funcSet) Iterate(cb func(f *Func)) { + s.Mutex.Lock() + uniq := make(map[*Func]struct{}, len(s.rest)+1) + for f := range s.rest { + uniq[f] = struct{}{} + } + s.Mutex.Unlock() + f := loadFunc(&s.first) + if f != nil { + uniq[f] = struct{}{} + } + for f := range uniq { + if f == nilFunc { + cb(nil) + } else { + cb(f) + } + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/funcstats.go b/vendor/github.com/spacemonkeygo/monkit/v3/funcstats.go new file mode 100644 index 000000000..d95a3b939 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/funcstats.go @@ -0,0 +1,219 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sync/atomic" + "time" + + "github.com/spacemonkeygo/monkit/v3/monotime" +) + +// FuncStats keeps track of statistics about a possible function's execution. +// Should be created with NewFuncStats, though expected creation is through a +// Func object: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// f := mon.Func() +// ... +// } +// +type FuncStats struct { + // sync/atomic things + current int64 + highwater int64 + parentsAndMutex funcSet + + // mutex things (reuses mutex from parents) + errors map[string]int64 + panics int64 + successTimes DurationDist + failureTimes DurationDist + key SeriesKey +} + +func initFuncStats(f *FuncStats, key SeriesKey) { + f.key = key + f.errors = map[string]int64{} + + key.Measurement += "_times" + initDurationDist(&f.successTimes, key.WithTag("kind", "success")) + initDurationDist(&f.failureTimes, key.WithTag("kind", "failure")) +} + +// NewFuncStats creates a FuncStats +func NewFuncStats(key SeriesKey) (f *FuncStats) { + f = &FuncStats{} + initFuncStats(f, key) + return f +} + +// Reset resets all recorded data. +func (f *FuncStats) Reset() { + atomic.StoreInt64(&f.current, 0) + atomic.StoreInt64(&f.highwater, 0) + f.parentsAndMutex.Lock() + f.errors = make(map[string]int64, len(f.errors)) + f.panics = 0 + f.successTimes.Reset() + f.failureTimes.Reset() + f.parentsAndMutex.Unlock() +} + +func (f *FuncStats) start(parent *Func) { + f.parentsAndMutex.Add(parent) + current := atomic.AddInt64(&f.current, 1) + for { + highwater := atomic.LoadInt64(&f.highwater) + if current <= highwater || + atomic.CompareAndSwapInt64(&f.highwater, highwater, current) { + break + } + } +} + +func (f *FuncStats) end(err error, panicked bool, duration time.Duration) { + atomic.AddInt64(&f.current, -1) + f.parentsAndMutex.Lock() + if panicked { + f.panics += 1 + f.failureTimes.Insert(duration) + f.parentsAndMutex.Unlock() + return + } + if err == nil { + f.successTimes.Insert(duration) + f.parentsAndMutex.Unlock() + return + } + f.failureTimes.Insert(duration) + f.errors[getErrorName(err)] += 1 + f.parentsAndMutex.Unlock() +} + +// Current returns how many concurrent instances of this function are currently +// being observed. +func (f *FuncStats) Current() int64 { return atomic.LoadInt64(&f.current) } + +// Highwater returns the highest value Current() would ever return. +func (f *FuncStats) Highwater() int64 { return atomic.LoadInt64(&f.highwater) } + +// Success returns the number of successes that have been observed +func (f *FuncStats) Success() (rv int64) { + f.parentsAndMutex.Lock() + rv = f.successTimes.Count + f.parentsAndMutex.Unlock() + return rv +} + +// Panics returns the number of panics that have been observed +func (f *FuncStats) Panics() (rv int64) { + f.parentsAndMutex.Lock() + rv = f.panics + f.parentsAndMutex.Unlock() + return rv +} + +// Errors returns the number of errors observed by error type. The error type +// is determined by handlers from AddErrorNameHandler, or a default that works +// with most error types. +func (f *FuncStats) Errors() (rv map[string]int64) { + f.parentsAndMutex.Lock() + rv = make(map[string]int64, len(f.errors)) + for errname, count := range f.errors { + rv[errname] = count + } + f.parentsAndMutex.Unlock() + return rv +} + +func (f *FuncStats) parents(cb func(f *Func)) { + f.parentsAndMutex.Iterate(cb) +} + +// Stats implements the StatSource interface +func (f *FuncStats) Stats(cb func(key SeriesKey, field string, val float64)) { + cb(f.key, "current", float64(f.Current())) + cb(f.key, "highwater", float64(f.Highwater())) + + f.parentsAndMutex.Lock() + panics := f.panics + errs := make(map[string]int64, len(f.errors)) + for errname, count := range f.errors { + errs[errname] = count + } + st := f.successTimes.Copy() + ft := f.failureTimes.Copy() + f.parentsAndMutex.Unlock() + + cb(f.key, "successes", float64(st.Count)) + e_count := int64(0) + for errname, count := range errs { + e_count += count + cb(f.key.WithTag("error_name", errname), "count", float64(count)) + } + cb(f.key, "errors", float64(e_count)) + cb(f.key, "panics", float64(panics)) + cb(f.key, "failures", float64(e_count+panics)) + cb(f.key, "total", float64(st.Count+e_count+panics)) + + st.Stats(cb) + ft.Stats(cb) +} + +// SuccessTimes returns a DurationDist of successes +func (f *FuncStats) SuccessTimes() *DurationDist { + f.parentsAndMutex.Lock() + d := f.successTimes.Copy() + f.parentsAndMutex.Unlock() + return d +} + +// FailureTimes returns a DurationDist of failures (includes panics and errors) +func (f *FuncStats) FailureTimes() *DurationDist { + f.parentsAndMutex.Lock() + d := f.failureTimes.Copy() + f.parentsAndMutex.Unlock() + return d +} + +// Observe starts the stopwatch for observing this function and returns a +// function to be called at the end of the function execution. Expected usage +// like: +// +// func MyFunc() (err error) { +// defer funcStats.Observe()(&err) +// ... +// } +// +func (f *FuncStats) Observe() func(errptr *error) { + f.start(nil) + start := monotime.Now() + return func(errptr *error) { + rec := recover() + panicked := rec != nil + finish := monotime.Now() + var err error + if errptr != nil { + err = *errptr + } + f.end(err, panicked, finish.Sub(start)) + if panicked { + panic(rec) + } + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/go.mod b/vendor/github.com/spacemonkeygo/monkit/v3/go.mod new file mode 100644 index 000000000..bb7dfb2d1 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/go.mod @@ -0,0 +1,5 @@ +module github.com/spacemonkeygo/monkit/v3 + +require golang.org/x/net v0.0.0-20190923162816-aa69164e4478 + +go 1.13 diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/go.sum b/vendor/github.com/spacemonkeygo/monkit/v3/go.sum new file mode 100644 index 000000000..dfd6002bd --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/go.sum @@ -0,0 +1,5 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/id.go b/vendor/github.com/spacemonkeygo/monkit/v3/id.go new file mode 100644 index 000000000..bd12d3dcc --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/id.go @@ -0,0 +1,48 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync/atomic" + + "github.com/spacemonkeygo/monkit/v3/monotime" +) + +var ( + idCounter uint64 + inc uint64 +) + +func init() { + var buf [16]byte + if _, err := crand.Read(buf[:]); err == nil { + idCounter = binary.BigEndian.Uint64(buf[0:8]) >> 1 + inc = binary.BigEndian.Uint64(buf[0:8])>>1 | 3 + } else { + rng := rand.New(rand.NewSource(monotime.Now().UnixNano())) + idCounter = uint64(rng.Int63()) + inc = uint64(rng.Int63() | 3) + } +} + +// NewId returns a random integer intended for use when constructing new +// traces. See NewTrace. +func NewId() int64 { + id := atomic.AddUint64(&idCounter, inc) + return int64(id >> 1) +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/intdist.go b/vendor/github.com/spacemonkeygo/monkit/v3/intdist.go new file mode 100644 index 000000000..104392a4c --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/intdist.go @@ -0,0 +1,184 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// WARNING: THE NON-M4 VERSIONS OF THIS FILE ARE GENERATED BY GO GENERATE! +// ONLY MAKE CHANGES TO THE M4 FILE +// + +package monkit + +import ( + "sort" +) + +// IntDist keeps statistics about values such as +// low/high/recent/average/quantiles. Not threadsafe. Construct with +// NewIntDist(). Fields are expected to be read from but not written to. +type IntDist struct { + // Low and High are the lowest and highest values observed since + // construction or the last reset. + Low, High int64 + + // Recent is the last observed value. + Recent int64 + + // Count is the number of observed values since construction or the last + // reset. + Count int64 + + // Sum is the sum of all the observed values since construction or the last + // reset. + Sum int64 + + key SeriesKey + reservoir [ReservoirSize]float32 + rng xorshift128 + sorted bool +} + +func initIntDist(v *IntDist, key SeriesKey) { + v.key = key + v.rng = newXORShift128() +} + +// NewIntDist creates a distribution of int64s. +func NewIntDist(key SeriesKey) (d *IntDist) { + d = &IntDist{} + initIntDist(d, key) + return d +} + +// Insert adds a value to the distribution, updating appropriate values. +func (d *IntDist) Insert(val int64) { + if d.Count != 0 { + if val < d.Low { + d.Low = val + } + if val > d.High { + d.High = val + } + } else { + d.Low = val + d.High = val + } + d.Recent = val + d.Sum += val + + index := d.Count + d.Count += 1 + + if index < ReservoirSize { + d.reservoir[index] = float32(val) + d.sorted = false + } else { + window := d.Count + // careful, the capitalization of Window is important + if Window > 0 && window > Window { + window = Window + } + // fast, but kind of biased. probably okay + j := d.rng.Uint64() % uint64(window) + if j < ReservoirSize { + d.reservoir[int(j)] = float32(val) + d.sorted = false + } + } +} + +// FullAverage calculates and returns the average of all inserted values. +func (d *IntDist) FullAverage() int64 { + if d.Count > 0 { + return d.Sum / int64(d.Count) + } + return 0 +} + +// ReservoirAverage calculates the average of the current reservoir. +func (d *IntDist) ReservoirAverage() int64 { + amount := ReservoirSize + if d.Count < int64(amount) { + amount = int(d.Count) + } + if amount <= 0 { + return 0 + } + var sum float32 + for i := 0; i < amount; i++ { + sum += d.reservoir[i] + } + return int64(sum / float32(amount)) +} + +// Query will return the approximate value at the given quantile from the +// reservoir, where 0 <= quantile <= 1. +func (d *IntDist) Query(quantile float64) int64 { + rlen := int(ReservoirSize) + if int64(rlen) > d.Count { + rlen = int(d.Count) + } + + if rlen < 2 { + return int64(d.reservoir[0]) + } + + reservoir := d.reservoir[:rlen] + if !d.sorted { + sort.Sort(float32Slice(reservoir)) + d.sorted = true + } + + if quantile <= 0 { + return int64(reservoir[0]) + } + if quantile >= 1 { + return int64(reservoir[rlen-1]) + } + + idx_float := quantile * float64(rlen-1) + idx := int(idx_float) + + diff := idx_float - float64(idx) + prior := float64(reservoir[idx]) + return int64(prior + diff*(float64(reservoir[idx+1])-prior)) +} + +// Copy returns a full copy of the entire distribution. +func (d *IntDist) Copy() *IntDist { + cp := *d + cp.rng = newXORShift128() + return &cp +} + +func (d *IntDist) Reset() { + d.Low, d.High, d.Recent, d.Count, d.Sum = 0, 0, 0, 0, 0 + // resetting count will reset the quantile reservoir +} + +func (d *IntDist) Stats(cb func(key SeriesKey, field string, val float64)) { + count := d.Count + cb(d.key, "count", float64(count)) + if count > 0 { + cb(d.key, "sum", d.toFloat64(d.Sum)) + cb(d.key, "min", d.toFloat64(d.Low)) + cb(d.key, "avg", d.toFloat64(d.FullAverage())) + cb(d.key, "max", d.toFloat64(d.High)) + cb(d.key, "rmin", d.toFloat64(d.Query(0))) + cb(d.key, "ravg", d.toFloat64(d.ReservoirAverage())) + cb(d.key, "r10", d.toFloat64(d.Query(.1))) + cb(d.key, "r50", d.toFloat64(d.Query(.5))) + cb(d.key, "r90", d.toFloat64(d.Query(.9))) + cb(d.key, "rmax", d.toFloat64(d.Query(1))) + cb(d.key, "recent", d.toFloat64(d.Recent)) + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/meter.go b/vendor/github.com/spacemonkeygo/monkit/v3/meter.go new file mode 100644 index 000000000..be0eecb74 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/meter.go @@ -0,0 +1,212 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sync" + "time" + + "github.com/spacemonkeygo/monkit/v3/monotime" +) + +const ( + ticksToKeep = 24 + timePerTick = 10 * time.Minute +) + +var ( + defaultTicker = ticker{} +) + +type meterBucket struct { + count int64 + start time.Time +} + +// Meter keeps track of events and their rates over time. +// Implements the StatSource interface. You should construct using NewMeter, +// though expected usage is like: +// +// var ( +// mon = monkit.Package() +// meter = mon.Meter("meter") +// ) +// +// func MyFunc() { +// ... +// meter.Mark(4) // 4 things happened +// ... +// } +// +type Meter struct { + mtx sync.Mutex + total int64 + slices [ticksToKeep]meterBucket + key SeriesKey +} + +// NewMeter constructs a Meter +func NewMeter(key SeriesKey) *Meter { + rv := &Meter{key: key} + now := monotime.Now() + for i := 0; i < ticksToKeep; i++ { + rv.slices[i].start = now + } + defaultTicker.register(rv) + return rv +} + +// Reset resets all internal state. +// +// Useful when monitoring a counter that has overflowed. +func (e *Meter) Reset(new_total int64) { + e.mtx.Lock() + e.total = new_total + now := monotime.Now() + for _, slice := range e.slices { + slice.count = 0 + slice.start = now + } + e.mtx.Unlock() +} + +// SetTotal sets the initial total count of the meter. +func (e *Meter) SetTotal(total int64) { + e.mtx.Lock() + e.total = total + e.mtx.Unlock() +} + +// Mark marks amount events occurring in the current time window. +func (e *Meter) Mark(amount int) { + e.mtx.Lock() + e.slices[ticksToKeep-1].count += int64(amount) + e.mtx.Unlock() +} + +// Mark64 marks amount events occurring in the current time window (int64 version). +func (e *Meter) Mark64(amount int64) { + e.mtx.Lock() + e.slices[ticksToKeep-1].count += amount + e.mtx.Unlock() +} + +func (e *Meter) tick(now time.Time) { + e.mtx.Lock() + // only advance meter buckets if something happened. otherwise + // rare events will always just have zero rates. + if e.slices[ticksToKeep-1].count != 0 { + e.total += e.slices[0].count + copy(e.slices[:], e.slices[1:]) + e.slices[ticksToKeep-1] = meterBucket{count: 0, start: now} + } + e.mtx.Unlock() +} + +func (e *Meter) stats(now time.Time) (rate float64, total int64) { + current := int64(0) + e.mtx.Lock() + start := e.slices[0].start + for i := 0; i < ticksToKeep; i++ { + current += e.slices[i].count + } + total = e.total + e.mtx.Unlock() + total += current + duration := now.Sub(start).Seconds() + if duration > 0 { + rate = float64(current) / duration + } else { + rate = 0 + } + return rate, total +} + +// Rate returns the rate over the internal sliding window +func (e *Meter) Rate() float64 { + rate, _ := e.stats(monotime.Now()) + return rate +} + +// Total returns the total over the internal sliding window +func (e *Meter) Total() float64 { + _, total := e.stats(monotime.Now()) + return float64(total) +} + +// Stats implements the StatSource interface +func (e *Meter) Stats(cb func(key SeriesKey, field string, val float64)) { + rate, total := e.stats(monotime.Now()) + cb(e.key, "rate", rate) + cb(e.key, "total", float64(total)) +} + +// DiffMeter is a StatSource that shows the difference between +// the rates of two meters. Expected usage like: +// +// var ( +// mon = monkit.Package() +// herps = mon.Meter("herps") +// derps = mon.Meter("derps") +// herpToDerp = mon.DiffMeter("herp_to_derp", herps, derps) +// ) +// +type DiffMeter struct { + meter1, meter2 *Meter + key SeriesKey +} + +// Constructs a DiffMeter. +func NewDiffMeter(key SeriesKey, meter1, meter2 *Meter) *DiffMeter { + return &DiffMeter{key: key, meter1: meter1, meter2: meter2} +} + +// Stats implements the StatSource interface +func (m *DiffMeter) Stats(cb func(key SeriesKey, field string, val float64)) { + now := monotime.Now() + rate1, total1 := m.meter1.stats(now) + rate2, total2 := m.meter2.stats(now) + cb(m.key, "rate", rate1-rate2) + cb(m.key, "total", float64(total1-total2)) +} + +type ticker struct { + mtx sync.Mutex + started bool + meters []*Meter +} + +func (t *ticker) register(m *Meter) { + t.mtx.Lock() + if !t.started { + t.started = true + go t.run() + } + t.meters = append(t.meters, m) + t.mtx.Unlock() +} + +func (t *ticker) run() { + for { + time.Sleep(timePerTick) + t.mtx.Lock() + meters := t.meters // this is safe since we only use append + t.mtx.Unlock() + now := monotime.Now() + for _, m := range meters { + m.tick(now) + } + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime.go b/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime.go new file mode 100644 index 000000000..6258eed38 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime.go @@ -0,0 +1,7 @@ +package monotime + +import "time" + +var initTime = time.Now() + +func Now() time.Time { return initTime.Add(elapsed()) } diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_fallback.go b/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_fallback.go new file mode 100644 index 000000000..f3724a50a --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_fallback.go @@ -0,0 +1,7 @@ +// +build !windows + +package monotime + +import "time" + +func elapsed() time.Duration { return time.Since(initTime) } diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_windows.go b/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_windows.go new file mode 100644 index 000000000..6154d2f01 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/monotime/monotime_windows.go @@ -0,0 +1,27 @@ +package monotime + +import ( + "syscall" + "time" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + queryPerformanceFrequencyProc = modkernel32.NewProc("QueryPerformanceFrequency") + queryPerformanceCounterProc = modkernel32.NewProc("QueryPerformanceCounter") + + qpcFrequency = queryPerformanceFrequency() +) + +func elapsed() time.Duration { + var elapsed int64 + syscall.Syscall(queryPerformanceCounterProc.Addr(), 1, uintptr(unsafe.Pointer(&elapsed)), 0, 0) + return time.Duration(elapsed) * time.Second / (time.Duration(qpcFrequency) * time.Nanosecond) +} + +func queryPerformanceFrequency() int64 { + var freq int64 + syscall.Syscall(queryPerformanceFrequencyProc.Addr(), 1, uintptr(unsafe.Pointer(&freq)), 0, 0) + return freq +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/registry.go b/vendor/github.com/spacemonkeygo/monkit/v3/registry.go new file mode 100644 index 000000000..4b2b82a18 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/registry.go @@ -0,0 +1,256 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sort" + "sync" +) + +type traceWatcherRef struct { + watcher func(*Trace) +} + +// Registry encapsulates all of the top-level state for a monitoring system. +// In general, only the Default registry is ever used. +type Registry struct { + // sync/atomic things + traceWatcher *traceWatcherRef + + watcherMtx sync.Mutex + watcherCounter int64 + traceWatchers map[int64]func(*Trace) + + scopeMtx sync.Mutex + scopes map[string]*Scope + + spanMtx sync.Mutex + spans map[*Span]struct{} + + orphanMtx sync.Mutex + orphans map[*Span]struct{} +} + +// NewRegistry creates a NewRegistry, though you almost certainly just want +// to use Default. +func NewRegistry() *Registry { + return &Registry{ + traceWatchers: map[int64]func(*Trace){}, + scopes: map[string]*Scope{}, + spans: map[*Span]struct{}{}, + orphans: map[*Span]struct{}{}} +} + +// Package creates a new monitoring Scope, named after the top level package. +// It's expected that you'll have something like +// +// var mon = monkit.Package() +// +// at the top of each package. +func (r *Registry) Package() *Scope { + return r.ScopeNamed(callerPackage(1)) +} + +// ScopeNamed is like Package, but lets you choose the name. +func (r *Registry) ScopeNamed(name string) *Scope { + r.scopeMtx.Lock() + defer r.scopeMtx.Unlock() + s, exists := r.scopes[name] + if exists { + return s + } + s = newScope(r, name) + r.scopes[name] = s + return s +} + +func (r *Registry) observeTrace(t *Trace) { + watcher := loadTraceWatcherRef(&r.traceWatcher) + if watcher != nil { + watcher.watcher(t) + } +} + +func (r *Registry) updateWatcher() { + cbs := make([]func(*Trace), 0, len(r.traceWatchers)) + for _, cb := range r.traceWatchers { + cbs = append(cbs, cb) + } + switch len(cbs) { + case 0: + storeTraceWatcherRef(&r.traceWatcher, nil) + case 1: + storeTraceWatcherRef(&r.traceWatcher, + &traceWatcherRef{watcher: cbs[0]}) + default: + storeTraceWatcherRef(&r.traceWatcher, + &traceWatcherRef{watcher: func(t *Trace) { + for _, cb := range cbs { + cb(t) + } + }}) + } +} + +// ObserveTraces lets you observe all traces flowing through the system. +// The passed in callback 'cb' will be called for every new trace as soon as +// it starts, until the returned cancel method is called. +// Note: this only applies to all new traces. If you want to find existing +// or running traces, please pull them off of live RootSpans. +func (r *Registry) ObserveTraces(cb func(*Trace)) (cancel func()) { + // even though observeTrace doesn't get a mutex, it's only ever loading + // the traceWatcher pointer, so we can use this mutex here to safely + // coordinate the setting of the traceWatcher pointer. + r.watcherMtx.Lock() + defer r.watcherMtx.Unlock() + + cbId := r.watcherCounter + r.watcherCounter += 1 + r.traceWatchers[cbId] = cb + r.updateWatcher() + + return func() { + r.watcherMtx.Lock() + defer r.watcherMtx.Unlock() + delete(r.traceWatchers, cbId) + r.updateWatcher() + } +} + +func (r *Registry) rootSpanStart(s *Span) { + r.spanMtx.Lock() + r.spans[s] = struct{}{} + r.spanMtx.Unlock() +} + +func (r *Registry) rootSpanEnd(s *Span) { + r.spanMtx.Lock() + delete(r.spans, s) + r.spanMtx.Unlock() +} + +func (r *Registry) orphanedSpan(s *Span) { + r.orphanMtx.Lock() + r.orphans[s] = struct{}{} + r.orphanMtx.Unlock() +} + +func (r *Registry) orphanEnd(s *Span) { + r.orphanMtx.Lock() + delete(r.orphans, s) + r.orphanMtx.Unlock() +} + +// RootSpans will call 'cb' on all currently executing Spans with no live or +// reachable parent. See also AllSpans. +func (r *Registry) RootSpans(cb func(s *Span)) { + r.spanMtx.Lock() + spans := make([]*Span, 0, len(r.spans)) + for s := range r.spans { + spans = append(spans, s) + } + r.spanMtx.Unlock() + r.orphanMtx.Lock() + orphans := make([]*Span, 0, len(r.orphans)) + for s := range r.orphans { + orphans = append(orphans, s) + } + r.orphanMtx.Unlock() + spans = append(spans, orphans...) + sort.Sort(spanSorter(spans)) + for _, s := range spans { + cb(s) + } +} + +func walkSpan(s *Span, cb func(s *Span)) { + cb(s) + s.Children(func(s *Span) { + walkSpan(s, cb) + }) +} + +// AllSpans calls 'cb' on all currently known Spans. See also RootSpans. +func (r *Registry) AllSpans(cb func(s *Span)) { + r.RootSpans(func(s *Span) { walkSpan(s, cb) }) +} + +// Scopes calls 'cb' on all currently known Scopes. +func (r *Registry) Scopes(cb func(s *Scope)) { + r.scopeMtx.Lock() + c := make([]*Scope, 0, len(r.scopes)) + for _, s := range r.scopes { + c = append(c, s) + } + r.scopeMtx.Unlock() + sort.Sort(scopeSorter(c)) + for _, s := range c { + cb(s) + } +} + +// Funcs calls 'cb' on all currently known Funcs. +func (r *Registry) Funcs(cb func(f *Func)) { + r.Scopes(func(s *Scope) { s.Funcs(cb) }) +} + +// Stats implements the StatSource interface. +func (r *Registry) Stats(cb func(key SeriesKey, field string, val float64)) { + r.Scopes(func(s *Scope) { + s.Stats(func(key SeriesKey, field string, val float64) { + cb(key.WithTag("scope", s.name), field, val) + }) + }) +} + +var _ StatSource = (*Registry)(nil) + +// Default is the default Registry +var Default = NewRegistry() + +// ScopeNamed is just a wrapper around Default.ScopeNamed +func ScopeNamed(name string) *Scope { return Default.ScopeNamed(name) } + +// RootSpans is just a wrapper around Default.RootSpans +func RootSpans(cb func(s *Span)) { Default.RootSpans(cb) } + +// Scopes is just a wrapper around Default.Scopes +func Scopes(cb func(s *Scope)) { Default.Scopes(cb) } + +// Funcs is just a wrapper around Default.Funcs +func Funcs(cb func(f *Func)) { Default.Funcs(cb) } + +// Package is just a wrapper around Default.Package +func Package() *Scope { return Default.ScopeNamed(callerPackage(1)) } + +// Stats is just a wrapper around Default.Stats +func Stats(cb func(key SeriesKey, field string, val float64)) { Default.Stats(cb) } + +type spanSorter []*Span + +func (s spanSorter) Len() int { return len(s) } +func (s spanSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s spanSorter) Less(i, j int) bool { + ispan, jspan := s[i], s[j] + iname, jname := ispan.f.FullName(), jspan.f.FullName() + return (iname < jname) || (iname == jname && ispan.id < jspan.id) +} + +type scopeSorter []*Scope + +func (s scopeSorter) Len() int { return len(s) } +func (s scopeSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s scopeSorter) Less(i, j int) bool { return s[i].name < s[j].name } diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/rng.go b/vendor/github.com/spacemonkeygo/monkit/v3/rng.go new file mode 100644 index 000000000..3770bbeee --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/rng.go @@ -0,0 +1,181 @@ +// Copyright (C) 2016 Space Monkey, Inc. + +package monkit + +import ( + "math/rand" +) + +// lcg is a simple linear congruential generator based on Knuths MMIX. +type lcg uint64 + +// Make sure lcg is a rand.Source +var _ rand.Source = (*lcg)(nil) + +func newLCG() lcg { return lcg(rand.Int63()) } + +// See Knuth. +const ( + a = 6364136223846793005 + c = 1442695040888963407 + h = 0xffffffff00000000 +) + +// Uint64 returns a uint64. +func (l *lcg) Uint64() (ret uint64) { + *l = a**l + c + ret |= uint64(*l) >> 32 + *l = a**l + c + ret |= uint64(*l) & h + return +} + +// Int63 returns a positive 63 bit integer in an int64 +func (l *lcg) Int63() int64 { + return int64(l.Uint64() >> 1) +} + +// Seed sets the state of the lcg. +func (l *lcg) Seed(seed int64) { + *l = lcg(seed) +} + +// +// xorshift family of generators from https://en.wikipedia.org/wiki/Xorshift +// +// xorshift64 is the xorshift64* generator +// xorshift1024 is the xorshift1024* generator +// xorshift128 is the xorshift128+ generator +// + +type xorshift64 uint64 + +var _ rand.Source = (*xorshift64)(nil) + +func newXORShift64() xorshift64 { return xorshift64(rand.Int63()) } + +// Uint64 returns a uint64. +func (s *xorshift64) Uint64() (ret uint64) { + x := uint64(*s) + x ^= x >> 12 // a + x ^= x << 25 // b + x ^= x >> 27 // c + x *= 2685821657736338717 + *s = xorshift64(x) + return x +} + +// Int63 returns a positive 63 bit integer in an int64 +func (s *xorshift64) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +// Seed sets the state of the lcg. +func (s *xorshift64) Seed(seed int64) { + *s = xorshift64(seed) +} + +type xorshift1024 struct { + s [16]uint64 + p int +} + +var _ rand.Source = (*xorshift1024)(nil) + +func newXORShift1024() xorshift1024 { + var x xorshift1024 + x.Seed(rand.Int63()) + return x +} + +// Seed sets the state of the lcg. +func (s *xorshift1024) Seed(seed int64) { + rng := xorshift64(seed) + *s = xorshift1024{ + s: [16]uint64{ + rng.Uint64(), rng.Uint64(), rng.Uint64(), rng.Uint64(), + rng.Uint64(), rng.Uint64(), rng.Uint64(), rng.Uint64(), + rng.Uint64(), rng.Uint64(), rng.Uint64(), rng.Uint64(), + rng.Uint64(), rng.Uint64(), rng.Uint64(), rng.Uint64(), + }, + p: 0, + } +} + +// Int63 returns a positive 63 bit integer in an int64 +func (s *xorshift1024) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +// Uint64 returns a uint64. +func (s *xorshift1024) Uint64() (ret uint64) { + // factoring this out proves to SSA backend that the array checks below + // do not need bounds checks + p := s.p & 15 + s0 := s.s[p] + p = (p + 1) & 15 + s.p = p + s1 := s.s[p] + s1 ^= s1 << 31 + s.s[p] = s1 ^ s0 ^ (s1 >> 1) ^ (s0 >> 30) + return s.s[p] * 1181783497276652981 +} + +// Jump is used to advance the state 2^512 iterations. +func (s *xorshift1024) Jump() { + var t [16]uint64 + for i := 0; i < 16; i++ { + for b := uint(0); b < 64; b++ { + if (xorshift1024jump[i] & (1 << b)) > 0 { + for j := 0; j < 16; j++ { + t[j] ^= s.s[(j+s.p)&15] + } + } + _ = s.Uint64() + } + } + for j := 0; j < 16; j++ { + s.s[(j+s.p)&15] = t[j] + } +} + +var xorshift1024jump = [16]uint64{ + 0x84242f96eca9c41d, 0xa3c65b8776f96855, 0x5b34a39f070b5837, + 0x4489affce4f31a1e, 0x2ffeeb0a48316f40, 0xdc2d9891fe68c022, + 0x3659132bb12fea70, 0xaac17d8efa43cab8, 0xc4cb815590989b13, + 0x5ee975283d71c93b, 0x691548c86c1bd540, 0x7910c41d10a1e6a5, + 0x0b5fc64563b3e2a8, 0x047f7684e9fc949d, 0xb99181f2d8f685ca, + 0x284600e3f30e38c3, +} + +type xorshift128 [2]uint64 + +var _ rand.Source = (*xorshift128)(nil) + +func newXORShift128() xorshift128 { + var s xorshift128 + s.Seed(rand.Int63()) + return s +} + +func (s *xorshift128) Seed(seed int64) { + rng := xorshift64(seed) + *s = xorshift128{ + rng.Uint64(), rng.Uint64(), + } +} + +// Int63 returns a positive 63 bit integer in an int64 +func (s *xorshift128) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +// Uint64 returns a uint64. +func (s *xorshift128) Uint64() (ret uint64) { + x := s[0] + y := s[1] + s[0] = y + x ^= x << 23 + s[1] = x ^ y ^ (x >> 17) ^ (y >> 26) + return s[1] + y +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/scope.go b/vendor/github.com/spacemonkeygo/monkit/v3/scope.go new file mode 100644 index 000000000..086ef0900 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/scope.go @@ -0,0 +1,301 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "fmt" + "strings" + "sync" +) + +// Scope represents a named collection of StatSources. Scopes are constructed +// through Registries. +type Scope struct { + r *Registry + name string + mtx sync.RWMutex + sources map[string]StatSource + chains []StatSource +} + +func newScope(r *Registry, name string) *Scope { + return &Scope{ + r: r, + name: name, + sources: map[string]StatSource{}} +} + +// Func retrieves or creates a Func named after the currently executing +// function name (via runtime.Caller. See FuncNamed to choose your own name. +func (s *Scope) Func() *Func { + return s.FuncNamed(callerFunc(0)) +} + +func (s *Scope) newSource(name string, constructor func() StatSource) ( + rv StatSource) { + + s.mtx.RLock() + source, exists := s.sources[name] + s.mtx.RUnlock() + + if exists { + return source + } + + s.mtx.Lock() + if source, exists := s.sources[name]; exists { + s.mtx.Unlock() + return source + } + + ss := constructor() + s.sources[name] = ss + s.mtx.Unlock() + + return ss +} + +// FuncNamed retrieves or creates a Func named using the given name and +// SeriesTags. See Func() for automatic name determination. +// +// Each unique combination of keys/values in each SeriesTag will result in a +// unique Func. SeriesTags are not sorted, so keep the order consistent to avoid +// unintentionally creating new unique Funcs. +func (s *Scope) FuncNamed(name string, tags ...SeriesTag) *Func { + var sourceName strings.Builder + sourceName.WriteString("func:") + sourceName.WriteString(name) + for _, tag := range tags { + sourceName.WriteByte(',') + sourceName.WriteString(tag.Key) + sourceName.WriteByte('=') + sourceName.WriteString(tag.Val) + } + source := s.newSource(sourceName.String(), func() StatSource { + key := NewSeriesKey("function").WithTag("name", name) + for _, tag := range tags { + key = key.WithTag(tag.Key, tag.Val) + } + return newFunc(s, key) + }) + f, ok := source.(*Func) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return f +} + +// Funcs calls 'cb' for all Funcs registered on this Scope. +func (s *Scope) Funcs(cb func(f *Func)) { + s.mtx.Lock() + funcs := make(map[*Func]struct{}, len(s.sources)) + for _, source := range s.sources { + if f, ok := source.(*Func); ok { + funcs[f] = struct{}{} + } + } + s.mtx.Unlock() + for f := range funcs { + cb(f) + } +} + +// Meter retrieves or creates a Meter named after the given name. See Event. +func (s *Scope) Meter(name string) *Meter { + source := s.newSource(name, func() StatSource { return NewMeter(NewSeriesKey(name)) }) + m, ok := source.(*Meter) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// Event retrieves or creates a Meter named after the given name and then +// calls Mark(1) on that meter. +func (s *Scope) Event(name string) { + s.Meter(name).Mark(1) +} + +// DiffMeter retrieves or creates a DiffMeter after the given name and two +// submeters. +func (s *Scope) DiffMeter(name string, m1, m2 *Meter) { + source := s.newSource(name, func() StatSource { + return NewDiffMeter(NewSeriesKey(name), m1, m2) + }) + if _, ok := source.(*DiffMeter); !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } +} + +// IntVal retrieves or creates an IntVal after the given name. +func (s *Scope) IntVal(name string) *IntVal { + source := s.newSource(name, func() StatSource { return NewIntVal(NewSeriesKey(name)) }) + m, ok := source.(*IntVal) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// IntValf retrieves or creates an IntVal after the given printf-formatted +// name. +func (s *Scope) IntValf(template string, args ...interface{}) *IntVal { + return s.IntVal(fmt.Sprintf(template, args...)) +} + +// FloatVal retrieves or creates a FloatVal after the given name. +func (s *Scope) FloatVal(name string) *FloatVal { + source := s.newSource(name, func() StatSource { return NewFloatVal(NewSeriesKey(name)) }) + m, ok := source.(*FloatVal) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// FloatValf retrieves or creates a FloatVal after the given printf-formatted +// name. +func (s *Scope) FloatValf(template string, args ...interface{}) *FloatVal { + return s.FloatVal(fmt.Sprintf(template, args...)) +} + +// BoolVal retrieves or creates a BoolVal after the given name. +func (s *Scope) BoolVal(name string) *BoolVal { + source := s.newSource(name, func() StatSource { return NewBoolVal(NewSeriesKey(name)) }) + m, ok := source.(*BoolVal) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// BoolValf retrieves or creates a BoolVal after the given printf-formatted +// name. +func (s *Scope) BoolValf(template string, args ...interface{}) *BoolVal { + return s.BoolVal(fmt.Sprintf(template, args...)) +} + +// StructVal retrieves or creates a StructVal after the given name. +func (s *Scope) StructVal(name string) *StructVal { + source := s.newSource(name, func() StatSource { return NewStructVal(NewSeriesKey(name)) }) + m, ok := source.(*StructVal) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// Timer retrieves or creates a Timer after the given name. +func (s *Scope) Timer(name string) *Timer { + source := s.newSource(name, func() StatSource { return NewTimer(NewSeriesKey(name)) }) + m, ok := source.(*Timer) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// Counter retrieves or creates a Counter after the given name. +func (s *Scope) Counter(name string) *Counter { + source := s.newSource(name, func() StatSource { return NewCounter(NewSeriesKey(name)) }) + m, ok := source.(*Counter) + if !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + return m +} + +// Gauge registers a callback that returns a float as the given name in the +// Scope's StatSource table. +func (s *Scope) Gauge(name string, cb func() float64) { + type gauge struct{ StatSource } + + // gauges allow overwriting + s.mtx.Lock() + defer s.mtx.Unlock() + + if source, exists := s.sources[name]; exists { + if _, ok := source.(gauge); !ok { + panic(fmt.Sprintf("%s already used for another stats source: %#v", + name, source)) + } + } + + s.sources[name] = gauge{StatSource: StatSourceFunc( + func(scb func(key SeriesKey, field string, value float64)) { + scb(NewSeriesKey(name), "value", cb()) + }), + } +} + +// Chain registers a full StatSource as the given name in the Scope's +// StatSource table. +func (s *Scope) Chain(source StatSource) { + // chains allow overwriting + s.mtx.Lock() + defer s.mtx.Unlock() + + s.chains = append(s.chains, source) +} + +func (s *Scope) allNamedSources() (sources []namedSource) { + s.mtx.Lock() + sources = make([]namedSource, 0, len(s.sources)) + for name, source := range s.sources { + sources = append(sources, namedSource{name: name, source: source}) + } + s.mtx.Unlock() + return sources +} + +// Stats implements the StatSource interface. +func (s *Scope) Stats(cb func(key SeriesKey, field string, val float64)) { + for _, namedSource := range s.allNamedSources() { + namedSource.source.Stats(cb) + } + + s.mtx.Lock() + chains := append([]StatSource(nil), s.chains...) + s.mtx.Unlock() + + for _, source := range chains { + source.Stats(cb) + } +} + +// Name returns the name of the Scope, often the Package name. +func (s *Scope) Name() string { return s.name } + +var _ StatSource = (*Scope)(nil) + +type namedSource struct { + name string + source StatSource +} + +type namedSourceList []namedSource + +func (l namedSourceList) Len() int { return len(l) } +func (l namedSourceList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l namedSourceList) Less(i, j int) bool { return l[i].name < l[j].name } diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/span.go b/vendor/github.com/spacemonkeygo/monkit/v3/span.go new file mode 100644 index 000000000..6326c6e9c --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/span.go @@ -0,0 +1,146 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "fmt" + "sort" + "time" +) + +type ctxKey int + +const ( + spanKey ctxKey = iota +) + +// Annotation represents an arbitrary name and value string pair +type Annotation struct { + Name string + Value string +} + +func (s *Span) addChild(child *Span) { + s.mtx.Lock() + s.children.Add(child) + done := s.done + s.mtx.Unlock() + if done { + child.orphan() + } +} + +func (s *Span) removeChild(child *Span) { + s.mtx.Lock() + s.children.Remove(child) + s.mtx.Unlock() +} + +func (s *Span) orphan() { + s.mtx.Lock() + if !s.done && !s.orphaned { + s.orphaned = true + s.f.scope.r.orphanedSpan(s) + } + s.mtx.Unlock() +} + +// Duration returns the current amount of time the Span has been running +func (s *Span) Duration() time.Duration { + return time.Since(s.start) +} + +// Start returns the time the Span started. +func (s *Span) Start() time.Time { + return s.start +} + +// Value implements context.Context +func (s *Span) Value(key interface{}) interface{} { + if key == spanKey { + return s + } + return s.Context.Value(key) +} + +// String implements context.Context +func (s *Span) String() string { + // TODO: for working with Contexts + return fmt.Sprintf("%v.WithSpan()", s.Context) +} + +// Children returns all known running child Spans. +func (s *Span) Children(cb func(s *Span)) { + found := map[*Span]bool{} + var sorter []*Span + s.mtx.Lock() + s.children.Iterate(func(s *Span) { + if !found[s] { + found[s] = true + sorter = append(sorter, s) + } + }) + s.mtx.Unlock() + sort.Sort(spanSorter(sorter)) + for _, s := range sorter { + cb(s) + } +} + +// Args returns the list of strings associated with the args given to the +// Task that created this Span. +func (s *Span) Args() (rv []string) { + rv = make([]string, 0, len(s.args)) + for _, arg := range s.args { + rv = append(rv, fmt.Sprintf("%#v", arg)) + } + return rv +} + +// Id returns the Span id. +func (s *Span) Id() int64 { return s.id } + +// Func returns the Func that kicked off this Span. +func (s *Span) Func() *Func { return s.f } + +// Trace returns the Trace this Span is associated with. +func (s *Span) Trace() *Trace { return s.trace } + +// Parent returns the Parent Span. +func (s *Span) Parent() *Span { return s.parent } + +// Annotations returns any added annotations created through the Span Annotate +// method +func (s *Span) Annotations() []Annotation { + s.mtx.Lock() + annotations := s.annotations // okay cause we only ever append to this slice + s.mtx.Unlock() + return append([]Annotation(nil), annotations...) +} + +// Annotate adds an annotation to the existing Span. +func (s *Span) Annotate(name, val string) { + s.mtx.Lock() + s.annotations = append(s.annotations, Annotation{Name: name, Value: val}) + s.mtx.Unlock() +} + +// Orphaned returns true if the Parent span ended before this Span did. +func (s *Span) Orphaned() (rv bool) { + s.mtx.Lock() + rv = s.orphaned + s.mtx.Unlock() + return rv +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/spanbag.go b/vendor/github.com/spacemonkeygo/monkit/v3/spanbag.go new file mode 100644 index 000000000..4e9a6d268 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/spanbag.go @@ -0,0 +1,59 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +// spanBag is a bag data structure (can add 0 or more references to a span, +// where every add needs to be matched with an equivalent remove). spanBag has +// a fast path for dealing with cases where the bag only has one element (the +// common case). spanBag is not threadsafe +type spanBag struct { + first *Span + rest map[*Span]int32 +} + +func (b *spanBag) Add(s *Span) { + if b.first == nil { + b.first = s + return + } + if b.rest == nil { + b.rest = map[*Span]int32{} + } + b.rest[s] += 1 +} + +func (b *spanBag) Remove(s *Span) { + if b.first == s { + b.first = nil + return + } + // okay it must be in b.rest + count := b.rest[s] + if count <= 1 { + delete(b.rest, s) + } else { + b.rest[s] = count - 1 + } +} + +// Iterate returns all elements +func (b *spanBag) Iterate(cb func(*Span)) { + if b.first != nil { + cb(b.first) + } + for s := range b.rest { + cb(s) + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/spinlock.go b/vendor/github.com/spacemonkeygo/monkit/v3/spinlock.go new file mode 100644 index 000000000..5b0d07144 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/spinlock.go @@ -0,0 +1,35 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "runtime" + "sync/atomic" +) + +type spinLock uint32 + +func (s *spinLock) Lock() { + for { + if atomic.CompareAndSwapUint32((*uint32)(s), 0, 1) { + return + } + runtime.Gosched() + } +} + +func (s *spinLock) Unlock() { + atomic.StoreUint32((*uint32)(s), 0) +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/stats.go b/vendor/github.com/spacemonkeygo/monkit/v3/stats.go new file mode 100644 index 000000000..128324129 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/stats.go @@ -0,0 +1,77 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "strings" +) + +// SeriesKey represents an individual time series for monkit to output. +type SeriesKey struct { + Measurement string + Tags *TagSet +} + +// NewSeriesKey constructs a new series with the minimal fields. +func NewSeriesKey(measurement string) SeriesKey { + return SeriesKey{Measurement: measurement} +} + +// WithTag returns a copy of the SeriesKey with the tag set +func (s SeriesKey) WithTag(key, value string) SeriesKey { + s.Tags = s.Tags.Set(key, value) + return s +} + +// String returns a string representation of the series. For example, it returns +// something like `measurement,tag0=val0,tag1=val1`. +func (s SeriesKey) String() string { + var builder strings.Builder + writeMeasurement(&builder, s.Measurement) + if s.Tags.Len() > 0 { + builder.WriteByte(',') + builder.WriteString(s.Tags.String()) + } + return builder.String() +} + +func (s SeriesKey) WithField(field string) string { + var builder strings.Builder + builder.WriteString(s.String()) + builder.WriteByte(' ') + writeTag(&builder, field) + return builder.String() +} + +// StatSource represents anything that can return named floating point values. +type StatSource interface { + Stats(cb func(key SeriesKey, field string, val float64)) +} + +type StatSourceFunc func(cb func(key SeriesKey, field string, val float64)) + +func (f StatSourceFunc) Stats(cb func(key SeriesKey, field string, val float64)) { + f(cb) +} + +// Collect takes something that implements the StatSource interface and returns +// a key/value map. +func Collect(mon StatSource) map[string]float64 { + rv := make(map[string]float64) + mon.Stats(func(key SeriesKey, field string, val float64) { + rv[key.WithField(field)] = val + }) + return rv +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/struct.go b/vendor/github.com/spacemonkeygo/monkit/v3/struct.go new file mode 100644 index 000000000..7f1babf4d --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/struct.go @@ -0,0 +1,59 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import "reflect" + +var f64Type = reflect.TypeOf(float64(0)) + +type emptyStatSource struct{} + +func (emptyStatSource) Stats(cb func(key SeriesKey, field string, val float64)) {} + +// StatSourceFromStruct uses the reflect package to implement the Stats call +// across all float64-castable fields of the struct. +func StatSourceFromStruct(key SeriesKey, structData interface{}) StatSource { + val := deref(reflect.ValueOf(structData)) + + typ := val.Type() + if typ.Kind() != reflect.Struct { + return emptyStatSource{} + } + + return StatSourceFunc(func(cb func(key SeriesKey, field string, val float64)) { + for i := 0; i < typ.NumField(); i++ { + field := deref(val.Field(i)) + field_type := field.Type() + + if field_type.Kind() == reflect.Struct && field.CanInterface() { + child_source := StatSourceFromStruct(key, field.Interface()) + child_source.Stats(func(key SeriesKey, field string, val float64) { + cb(key, typ.Field(i).Name+"."+field, val) + }) + + } else if field_type.ConvertibleTo(f64Type) { + cb(key, typ.Field(i).Name, field.Convert(f64Type).Float()) + } + } + }) +} + +// if val is a pointer, deref until it isn't +func deref(val reflect.Value) reflect.Value { + for val.Type().Kind() == reflect.Ptr { + val = val.Elem() + } + return val +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/tags.go b/vendor/github.com/spacemonkeygo/monkit/v3/tags.go new file mode 100644 index 000000000..8f4f596a7 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/tags.go @@ -0,0 +1,157 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sort" + "strings" +) + +// SeriesTag is a key/value pair. When used with a measurement name, each set +// of unique key/value pairs represents a new unique series. +type SeriesTag struct { + Key, Val string +} + +// NewTag creates a new tag +func NewSeriesTag(key, val string) SeriesTag { + return SeriesTag{key, val} +} + +// TagSet is an immutible collection of tag, value pairs. +type TagSet struct { + all map[string]string + str string // cached string form +} + +// Get returns the value associated with the key. +func (t *TagSet) Get(key string) string { + if t == nil || t.all == nil { + return "" + } + return t.all[key] +} + +// All returns a map of all the key/value pairs in the tag set. It +// should not be modified. +func (t *TagSet) All() map[string]string { + if t == nil { + return nil + } + return t.all +} + +// Len returns the number of tags in the tag set. +func (t *TagSet) Len() int { + if t == nil { + return 0 + } + return len(t.all) +} + +// Set returns a new tag set with the key associated to the value. +func (t *TagSet) Set(key, value string) *TagSet { + return t.SetAll(map[string]string{key: value}) +} + +// SetAll returns a new tag set with the key value pairs in the map all set. +func (t *TagSet) SetAll(kvs map[string]string) *TagSet { + all := make(map[string]string) + if t != nil { + for key, value := range t.all { + all[key] = value + } + } + for key, value := range kvs { + all[key] = value + } + return &TagSet{all: all} +} + +// String returns a string form of the tag set suitable for sending to influxdb. +func (t *TagSet) String() string { + if t == nil { + return "" + } + if t.str == "" { + var builder strings.Builder + t.writeTags(&builder) + t.str = builder.String() + } + return t.str +} + +// writeTags writes the tags in the tag set to the builder. +func (t *TagSet) writeTags(builder *strings.Builder) { + type kv struct { + key string + value string + } + var kvs []kv + + for key, value := range t.all { + kvs = append(kvs, kv{key, value}) + } + sort.Slice(kvs, func(i, j int) bool { + return kvs[i].key < kvs[j].key + }) + + for i, kv := range kvs { + if i > 0 { + builder.WriteByte(',') + } + writeTag(builder, kv.key) + builder.WriteByte('=') + writeTag(builder, kv.value) + } +} + +// writeMeasurement writes a measurement to the builder. +func writeMeasurement(builder *strings.Builder, measurement string) { + if strings.IndexByte(measurement, ',') == -1 && + strings.IndexByte(measurement, ' ') == -1 { + + builder.WriteString(measurement) + return + } + + for i := 0; i < len(measurement); i++ { + if measurement[i] == ',' || + measurement[i] == ' ' { + builder.WriteByte('\\') + } + builder.WriteByte(measurement[i]) + } +} + +// writeTag writes a tag key, value, or field key to the builder. +func writeTag(builder *strings.Builder, tag string) { + if strings.IndexByte(tag, ',') == -1 && + strings.IndexByte(tag, '=') == -1 && + strings.IndexByte(tag, ' ') == -1 { + + builder.WriteString(tag) + return + } + + for i := 0; i < len(tag); i++ { + if tag[i] == ',' || + tag[i] == '=' || + tag[i] == ' ' { + builder.WriteByte('\\') + } + builder.WriteByte(tag[i]) + } +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/task.go b/vendor/github.com/spacemonkeygo/monkit/v3/task.go new file mode 100644 index 000000000..5e604a3af --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/task.go @@ -0,0 +1,74 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "time" +) + +type taskKey int + +const taskGetFunc taskKey = 0 + +type taskSecretT struct{} + +func (*taskSecretT) Value(key interface{}) interface{} { return nil } +func (*taskSecretT) Done() <-chan struct{} { return nil } +func (*taskSecretT) Err() error { return nil } +func (*taskSecretT) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +// Func returns the Func associated with the Task +func (f Task) Func() (out *Func) { + // we're doing crazy things to make a function have methods that do other + // things with internal state. basically, we have a secret argument we can + // pass to the function that is only checked if ctx is taskSecret ( + // which it should never be) that controls what other behavior we want. + // in this case, if arg[0] is taskGetFunc, then f will place the func in the + // out location. + // since someone can cast any function of this signature to a lazy task, + // let's make sure we got roughly expected behavior and panic otherwise + if f(&taskSecret, taskGetFunc, &out) != nil || out == nil { + panic("Func() called on a non-Task function") + } + return out +} + +func taskArgs(f *Func, args []interface{}) bool { + // this function essentially does method dispatch for Tasks. returns true + // if a method got dispatched and normal behavior should be aborted + if len(args) != 2 { + return false + } + val, ok := args[0].(taskKey) + if !ok { + return false + } + switch val { + case taskGetFunc: + *(args[1].(**Func)) = f + return true + } + return false +} + +// TaskNamed is like Task except you can choose the name of the associated +// Func. +// +// You may also include any SeriesTags which should be included with the Task. +func (s *Scope) TaskNamed(name string, tags ...SeriesTag) Task { + return s.FuncNamed(name, tags...).Task +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/timer.go b/vendor/github.com/spacemonkeygo/monkit/v3/timer.go new file mode 100644 index 000000000..69c2f4206 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/timer.go @@ -0,0 +1,96 @@ +// Copyright (C) 2016 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sync" + "time" + + "github.com/spacemonkeygo/monkit/v3/monotime" +) + +// Timer is a threadsafe convenience wrapper around a DurationDist. You should +// construct with NewTimer(), though the expected usage is from a Scope like +// so: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// ... +// timer := mon.Timer("event") +// // perform event +// timer.Stop() +// ... +// } +// +// Timers implement StatSource. +type Timer struct { + mtx sync.Mutex + times *DurationDist +} + +// NewTimer constructs a new Timer. +func NewTimer(key SeriesKey) *Timer { + return &Timer{times: NewDurationDist(key)} +} + +// Start constructs a RunningTimer +func (t *Timer) Start() *RunningTimer { + return &RunningTimer{ + start: monotime.Now(), + t: t} +} + +// RunningTimer should be constructed from a Timer. +type RunningTimer struct { + start time.Time + t *Timer + stopped bool +} + +// Elapsed just returns the amount of time since the timer started +func (r *RunningTimer) Elapsed() time.Duration { + return time.Since(r.start) +} + +// Stop stops the timer, adds the duration to the statistics information, and +// returns the elapsed time. +func (r *RunningTimer) Stop() time.Duration { + elapsed := r.Elapsed() + r.t.mtx.Lock() + if !r.stopped { + r.t.times.Insert(elapsed) + r.stopped = true + } + r.t.mtx.Unlock() + return elapsed +} + +// Values returns the main timer values +func (t *Timer) Values() *DurationDist { + t.mtx.Lock() + rv := t.times.Copy() + t.mtx.Unlock() + return rv +} + +// Stats implements the StatSource interface +func (t *Timer) Stats(cb func(key SeriesKey, field string, val float64)) { + t.mtx.Lock() + times := t.times.Copy() + t.mtx.Unlock() + + times.Stats(cb) +} diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/trace.go b/vendor/github.com/spacemonkeygo/monkit/v3/trace.go new file mode 100644 index 000000000..d20232455 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/trace.go @@ -0,0 +1,136 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sync" + "sync/atomic" + "time" +) + +// SpanObserver is the interface plugins must implement if they want to observe +// all spans on a given trace as they happen. +type SpanObserver interface { + // Start is called when a Span starts + Start(s *Span) + + // Finish is called when a Span finishes, along with an error if any, whether + // or not it panicked, and what time it finished. + Finish(s *Span, err error, panicked bool, finish time.Time) +} + +// Trace represents a 'trace' of execution. A 'trace' is the collection of all +// of the 'spans' kicked off from the same root execution context. A trace is +// a concurrency-supporting analog of a stack trace, where a span is somewhat +// like a stack frame. +type Trace struct { + // sync/atomic things + spanCount int64 + spanObservers *spanObserverTuple + + // immutable things from construction + id int64 + + // protected by mtx + mtx sync.Mutex + vals map[interface{}]interface{} +} + +// NewTrace creates a new Trace. +func NewTrace(id int64) *Trace { + return &Trace{id: id} +} + +func (t *Trace) getObserver() SpanCtxObserver { + observers := loadSpanObserverTuple(&t.spanObservers) + if observers == nil { + return nil + } + if loadSpanObserverTuple(&observers.cdr) == nil { + return observers.car + } + return observers +} + +// ObserveSpans lets you register a SpanObserver for all future Spans on the +// Trace. The returned cancel method will remove your observer from the trace. +func (t *Trace) ObserveSpans(observer SpanObserver) (cancel func()) { + return t.ObserveSpansCtx(spanObserverToSpanCtxObserver{observer: observer}) +} + +// ObserveSpansCtx lets you register a SpanCtxObserver for all future Spans on the +// Trace. The returned cancel method will remove your observer from the trace. +func (t *Trace) ObserveSpansCtx(observer SpanCtxObserver) (cancel func()) { + for { + existing := loadSpanObserverTuple(&t.spanObservers) + ref := &spanObserverTuple{car: observer, cdr: existing} + if compareAndSwapSpanObserverTuple(&t.spanObservers, existing, ref) { + return func() { t.removeObserver(ref) } + } + } +} + +func (t *Trace) removeObserver(ref *spanObserverTuple) { + t.mtx.Lock() + defer t.mtx.Unlock() + for { + if removeObserverFrom(&t.spanObservers, ref) { + return + } + } +} + +func removeObserverFrom(parent **spanObserverTuple, ref *spanObserverTuple) ( + success bool) { + existing := loadSpanObserverTuple(parent) + if existing == nil { + return true + } + if existing != ref { + return removeObserverFrom(&existing.cdr, ref) + } + return compareAndSwapSpanObserverTuple(parent, existing, + loadSpanObserverTuple(&existing.cdr)) +} + +// Id returns the id of the Trace +func (t *Trace) Id() int64 { return t.id } + +// Get returns a value associated with a key on a trace. See Set. +func (t *Trace) Get(key interface{}) (val interface{}) { + t.mtx.Lock() + if t.vals != nil { + val = t.vals[key] + } + t.mtx.Unlock() + return val +} + +// Set sets a value associated with a key on a trace. See Get. +func (t *Trace) Set(key, val interface{}) { + t.mtx.Lock() + if t.vals == nil { + t.vals = map[interface{}]interface{}{key: val} + } else { + t.vals[key] = val + } + t.mtx.Unlock() +} + +func (t *Trace) incrementSpans() { atomic.AddInt64(&t.spanCount, 1) } +func (t *Trace) decrementSpans() { atomic.AddInt64(&t.spanCount, -1) } + +// Spans returns the number of spans currently associated with the Trace. +func (t *Trace) Spans() int64 { return atomic.LoadInt64(&t.spanCount) } diff --git a/vendor/github.com/spacemonkeygo/monkit/v3/val.go b/vendor/github.com/spacemonkeygo/monkit/v3/val.go new file mode 100644 index 000000000..b690ae5f1 --- /dev/null +++ b/vendor/github.com/spacemonkeygo/monkit/v3/val.go @@ -0,0 +1,204 @@ +// Copyright (C) 2015 Space Monkey, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package monkit + +import ( + "sync" + "sync/atomic" +) + +// IntVal is a convenience wrapper around an IntDist. Constructed using +// NewIntVal, though its expected usage is like: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// ... +// mon.IntVal("size").Observe(val) +// ... +// } +// +type IntVal struct { + mtx sync.Mutex + dist IntDist +} + +// NewIntVal creates an IntVal +func NewIntVal(key SeriesKey) (v *IntVal) { + v = &IntVal{} + initIntDist(&v.dist, key) + return v +} + +// Observe observes an integer value +func (v *IntVal) Observe(val int64) { + v.mtx.Lock() + v.dist.Insert(val) + v.mtx.Unlock() +} + +// Stats implements the StatSource interface. +func (v *IntVal) Stats(cb func(key SeriesKey, field string, val float64)) { + v.mtx.Lock() + vd := v.dist.Copy() + v.mtx.Unlock() + + vd.Stats(cb) +} + +// Quantile returns an estimate of the requested quantile of observed values. +// 0 <= quantile <= 1 +func (v *IntVal) Quantile(quantile float64) (rv int64) { + v.mtx.Lock() + rv = v.dist.Query(quantile) + v.mtx.Unlock() + return rv +} + +// FloatVal is a convenience wrapper around an FloatDist. Constructed using +// NewFloatVal, though its expected usage is like: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// ... +// mon.FloatVal("size").Observe(val) +// ... +// } +// +type FloatVal struct { + mtx sync.Mutex + dist FloatDist +} + +// NewFloatVal creates a FloatVal +func NewFloatVal(key SeriesKey) (v *FloatVal) { + v = &FloatVal{} + initFloatDist(&v.dist, key) + return v +} + +// Observe observes an floating point value +func (v *FloatVal) Observe(val float64) { + v.mtx.Lock() + v.dist.Insert(val) + v.mtx.Unlock() +} + +// Stats implements the StatSource interface. +func (v *FloatVal) Stats(cb func(key SeriesKey, field string, val float64)) { + v.mtx.Lock() + vd := v.dist.Copy() + v.mtx.Unlock() + + vd.Stats(cb) +} + +// Quantile returns an estimate of the requested quantile of observed values. +// 0 <= quantile <= 1 +func (v *FloatVal) Quantile(quantile float64) (rv float64) { + v.mtx.Lock() + rv = v.dist.Query(quantile) + v.mtx.Unlock() + return rv +} + +// BoolVal keeps statistics about boolean values. It keeps the number of trues, +// number of falses, and the disposition (number of trues minus number of +// falses). Constructed using NewBoolVal, though its expected usage is like: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// ... +// mon.BoolVal("flipped").Observe(bool) +// ... +// } +// +type BoolVal struct { + trues int64 + falses int64 + recent int32 + key SeriesKey +} + +// NewBoolVal creates a BoolVal +func NewBoolVal(key SeriesKey) *BoolVal { + return &BoolVal{key: key} +} + +// Observe observes a boolean value +func (v *BoolVal) Observe(val bool) { + if val { + atomic.AddInt64(&v.trues, 1) + atomic.StoreInt32(&v.recent, 1) + } else { + atomic.AddInt64(&v.falses, 1) + atomic.StoreInt32(&v.recent, 0) + } +} + +// Stats implements the StatSource interface. +func (v *BoolVal) Stats(cb func(key SeriesKey, field string, val float64)) { + trues := atomic.LoadInt64(&v.trues) + falses := atomic.LoadInt64(&v.falses) + recent := atomic.LoadInt32(&v.recent) + cb(v.key, "disposition", float64(trues-falses)) + cb(v.key, "false", float64(falses)) + cb(v.key, "recent", float64(recent)) + cb(v.key, "true", float64(trues)) +} + +// StructVal keeps track of a structure of data. Constructed using +// NewStructVal, though its expected usage is like: +// +// var mon = monkit.Package() +// +// func MyFunc() { +// ... +// mon.StructVal("stats").Observe(stats) +// ... +// } +// +type StructVal struct { + mtx sync.Mutex + recent interface{} + key SeriesKey +} + +// NewStructVal creates a StructVal +func NewStructVal(key SeriesKey) *StructVal { + return &StructVal{key: key} +} + +// Observe observes a struct value. Only the fields convertable to float64 will +// be monitored. A reference to the most recently called Observe value is kept +// for reading when Stats is called. +func (v *StructVal) Observe(val interface{}) { + v.mtx.Lock() + v.recent = val + v.mtx.Unlock() +} + +// Stats implements the StatSource interface. +func (v *StructVal) Stats(cb func(key SeriesKey, field string, val float64)) { + v.mtx.Lock() + recent := v.recent + v.mtx.Unlock() + + if recent != nil { + StatSourceFromStruct(v.key, recent).Stats(cb) + } +} diff --git a/vendor/github.com/vivint/infectious/.travis.yml b/vendor/github.com/vivint/infectious/.travis.yml new file mode 100644 index 000000000..703e10943 --- /dev/null +++ b/vendor/github.com/vivint/infectious/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +script: + - go build ./... diff --git a/vendor/github.com/vivint/infectious/LICENSE b/vendor/github.com/vivint/infectious/LICENSE new file mode 100644 index 000000000..43cba6f32 --- /dev/null +++ b/vendor/github.com/vivint/infectious/LICENSE @@ -0,0 +1,67 @@ +///////////////// +// Copyright notices +///////////////// + +Copyright (C) 2016-2017 Vivint, Inc. +Copyright (c) 2015 Klaus Post +Copyright (c) 2015 Backblaze +Copyright (C) 2011 Billy Brumley (billy.brumley@aalto.fi) +Copyright (C) 2009-2010 Jack Lloyd (lloyd@randombit.net) +Copyright (C) 1996-1998 Luigi Rizzo (luigi@iet.unipi.it) + +Portions derived from code by Phil Karn (karn@ka9q.ampr.org), +Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari +Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 + +///////////////// +// Portions of this project (labeled in each file) are licensed under this +// license: +///////////////// + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +///////////////// +// All other portions of this project are licensed under this license: +///////////////// + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/vivint/infectious/README.md b/vendor/github.com/vivint/infectious/README.md new file mode 100644 index 000000000..e0486242c --- /dev/null +++ b/vendor/github.com/vivint/infectious/README.md @@ -0,0 +1,133 @@ +# infectious + +[![GoDoc](https://godoc.org/github.com/vivint/infectious?status.png)](https://godoc.org/github.com/vivint/infectious) + +Infectious implements +[Reed-Solomon forward error correction](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction). +It uses the +[Berlekamp-Welch error correction algorithm](https://en.wikipedia.org/wiki/Berlekamp%E2%80%93Welch_algorithm) +to achieve the ability to actually correct errors. + +[We wrote a blog post about how this library works!](https://innovation.vivint.com/introduction-to-reed-solomon-bc264d0794f8) + +### Example + +```golang +const ( + required = 8 + total = 14 +) + +// Create a *FEC, which will require required pieces for reconstruction at +// minimum, and generate total total pieces. +f, err := infectious.NewFEC(required, total) +if err != nil { + panic(err) +} + +// Prepare to receive the shares of encoded data. +shares := make([]infectious.Share, total) +output := func(s infectious.Share) { + // the memory in s gets reused, so we need to make a deep copy + shares[s.Number] = s.DeepCopy() +} + +// the data to encode must be padded to a multiple of required, hence the +// underscores. +err = f.Encode([]byte("hello, world! __"), output) +if err != nil { + panic(err) +} + +// we now have total shares. +for _, share := range shares { + fmt.Printf("%d: %#v\n", share.Number, string(share.Data)) +} + +// Let's reconstitute with two pieces missing and one piece corrupted. +shares = shares[2:] // drop the first two pieces +shares[2].Data[1] = '!' // mutate some data + +result, err := f.Decode(nil, shares) +if err != nil { + panic(err) +} + +// we have the original data! +fmt.Printf("got: %#v\n", string(result)) +``` + +**Caution:** this package API leans toward providing the user more power and +performance at the expense of having some really sharp edges! Read the +documentation about memory lifecycles carefully! + +Please see the docs at http://godoc.org/github.com/vivint/infectious + +### Thanks + +We're forever indebted to the giants on whose shoulders we stand. The LICENSE +has our full copyright history, but an extra special thanks to Klaus Post for +much of the initial Go code. See his post for more: +http://blog.klauspost.com/blazingly-fast-reed-solomon-coding/ + +### LICENSE + + * Copyright (C) 2016-2017 Vivint, Inc. + * Copyright (c) 2015 Klaus Post + * Copyright (c) 2015 Backblaze + * Copyright (C) 2011 Billy Brumley (billy.brumley@aalto.fi) + * Copyright (C) 2009-2010 Jack Lloyd (lloyd@randombit.net) + * Copyright (C) 1996-1998 Luigi Rizzo (luigi@iet.unipi.it) + +Portions derived from code by Phil Karn (karn@ka9q.ampr.org), +Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari +Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 + +**Portions of this project (labeled in each file) are licensed under this +license:** + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +**All other portions of this project are licensed under this license:** + +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/vivint/infectious/addmul_amd64.go b/vendor/github.com/vivint/infectious/addmul_amd64.go new file mode 100644 index 000000000..09956112a --- /dev/null +++ b/vendor/github.com/vivint/infectious/addmul_amd64.go @@ -0,0 +1,58 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// Copyright (c) 2015 Klaus Post +// Copyright (c) 2015 Backblaze +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package infectious + +//go:noescape +func addmulSSSE3(lowhigh *pair, in, out *byte, n int, mul *byte) + +//go:noescape +func addmulAVX2(lowhigh *pair, in, out *byte, n int) + +func addmul(z, x []byte, y byte) { + if len(z) == 0 { + return + } + + var done int + if hasAVX2 { + addmulAVX2(&mul_table_pair[y], &x[0], &z[0], len(z)) + done = (len(x) >> 5) << 5 + } else if hasSSSE3 { + addmulSSSE3(&mul_table_pair[y], &x[0], &z[0], len(z), &gf_mul_table[y][0]) + //done = (len(x) >> 4) << 4 + return + } + + if done < len(z) { + // hints to the compiler to remove bounds checks + z = z[done:] + x = x[done : done+len(z)] + + gf_mul_y := gf_mul_table[y][:] + for i := range z { + z[i] ^= gf_mul_y[x[i]] + } + } +} diff --git a/vendor/github.com/vivint/infectious/addmul_amd64.s b/vendor/github.com/vivint/infectious/addmul_amd64.s new file mode 100644 index 000000000..9d5e46b9f --- /dev/null +++ b/vendor/github.com/vivint/infectious/addmul_amd64.s @@ -0,0 +1,199 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// Copyright (c) 2015 Klaus Post +// Copyright (c) 2015 Backblaze +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +/* +The corresponding C implementations: + +void addmul( + uint8_t * restrict lowhigh, + uint8_t * restrict in, + uint8_t * restrict out, + int n +) { + for(int i = 0; i < n; i++){ + int value = in[i]; + int low = value & 15; + int high = value >> 4; + out[i] = out[i] ^ lowhigh[low] ^ lowhigh[high+16]; + } +} + +void addmulSSSE3( + uint8_t * restrict lowhigh, + uint8_t * restrict in, + uint8_t * restrict out, + int n +) { + int i = 0; + + __m128i lotbl = _mm_loadu_si128((__m128i*)(&lowhigh[0])); + __m128i hitbl = _mm_loadu_si128((__m128i*)(&lowhigh[16])); + + __m128i lomask = _mm_set1_epi8(0xF); + + #pragma nounroll + for(i = 0; i < (n/16)*16; i += 16){ + __m128i input8 = _mm_loadu_si128((__m128i*)(&in[i])); + __m128i output8 = _mm_loadu_si128((__m128i*)(&out[i])); + + __m128i lo8 = _mm_and_si128(lomask, input8); + __m128i hi8 = _mm_and_si128(lomask, _mm_srli_si128(input8, 4)); // simulate shrli epi8 + + output8 = _mm_xor_si128(output8, _mm_shuffle_epi8(lotbl, lo8)); + output8 = _mm_xor_si128(output8, _mm_shuffle_epi8(hitbl, hi8)); + + _mm_storeu_si128((__m128i*)(&out[i]), output8); + } +} +*/ + +#include "textflag.h" +DATA nybble_mask<>+0x00(SB)/8, $0x0F0F0F0F0F0F0F0F +DATA nybble_mask<>+0x08(SB)/8, $0x0F0F0F0F0F0F0F0F +DATA nybble_mask<>+0x10(SB)/8, $0x0F0F0F0F0F0F0F0F +DATA nybble_mask<>+0x18(SB)/8, $0x0F0F0F0F0F0F0F0F +GLOBL nybble_mask<>(SB), (NOPTR+RODATA), $32 + +#define LOWHIGH DI +#define LOW X8 +#define HIGH X9 +#define IN SI +#define OUT DX +#define INDEX AX + +#define LEN CX +#define LEN16 R8 // LEN16 = (LEN / 16) * 16 + +#define LOMASK X7 // LOMASK = repeated 15 +// X0-X5 temps + +// func addmulSSSE3(lowhigh *[2][16]byte, in, out *byte, len int) +TEXT ·addmulSSSE3(SB), 7, $0 + MOVQ _in+8(FP), IN + MOVQ _out+16(FP), OUT + MOVQ _len+24(FP), LEN + + MOVQ LEN, LEN16 + ANDQ $-16, LEN16 + + JLE start_slow // if LEN16 == 0 { goto done } + + MOVQ _lohi+0(FP), LOWHIGH + MOVOU (LOWHIGH), LOW + MOVOU 16(LOWHIGH), HIGH + + MOVOU nybble_mask<>(SB), LOMASK + XORQ INDEX, INDEX // INDEX = 0 + +loop16: + MOVOU (IN)(INDEX*1), X0 // X0 = INPUT[INDEX] + MOVOU LOW, X4 // X4 = copy(LOW) + MOVOU (OUT)(INDEX*1), X2 // X2 = OUT[INDEX] + MOVOU X0, X1 // X0 = input[index] & 15 + MOVOU HIGH, X5 // X5 = copy(HIGH) + + PAND LOMASK, X0 + PSRLQ $4, X1 // X1 = input[index] + PSHUFB X0, X4 // X4 = LOW[X0] + + PAND LOMASK, X1 // X1 = input[index] >> 4 + PSHUFB X1, X5 // X5 = HIGH[X1] + PXOR X4, X2 // X2 = OUT[INDEX] ^ X4 ^ X5 + PXOR X5, X2 + + MOVOU X2, 0(OUT)(INDEX*1) + + ADDQ $16, INDEX + CMPQ LEN16, INDEX // INDEX < LEN16 + JG loop16 + +start_slow: + MOVQ _len+32(FP), LOWHIGH + MOVQ LEN16, INDEX + CMPQ LEN, INDEX + JLE done + +loop1: + MOVBQZX (IN)(INDEX*1), R9 // R9 := in[index] + MOVBQZX (LOWHIGH)(R9*1), R10 // R10 := multiply[R9] + XORB R10B, (OUT)(INDEX*1) // out[index] ^= R10 + INCQ INDEX + CMPQ LEN, INDEX + JG loop1 + +done: + RET + +#undef LOWHIGH +#undef LOW +#undef HIGH +#undef IN +#undef OUT +#undef LEN +#undef INDEX +#undef LEN16 +#undef LOMASK + +// func addmulAVX2(lowhigh *[2][16]byte, in, out *byte, len int) +TEXT ·addmulAVX2(SB), 7, $0 + MOVQ low+0(FP), SI // SI: &lowhigh + MOVOU (SI), X6 // X6: low + MOVOU 16(SI), X7 // X7: high + + MOVQ $15, BX // BX: low mask + MOVQ BX, X5 + + MOVQ len+24(FP), R9 // R9: len(in), len(out) + + LONG $0x384de3c4; WORD $0x01f6 // VINSERTI128 YMM6, YMM6, XMM6, 1 ; low + LONG $0x3845e3c4; WORD $0x01ff // VINSERTI128 YMM7, YMM7, XMM7, 1 ; high + LONG $0x787d62c4; BYTE $0xc5 // VPBROADCASTB YMM8, XMM5 ; X8: lomask (unpacked) + + SHRQ $5, R9 // len(in) / 32 + MOVQ out+16(FP), DX // DX: &out + MOVQ in+8(FP), SI // R11: &in + TESTQ R9, R9 + JZ done_xor_avx2 + +loopback_xor_avx2: + LONG $0x066ffec5 // VMOVDQU YMM0, [rsi] + LONG $0x226ffec5 // VMOVDQU YMM4, [rdx] + LONG $0xd073f5c5; BYTE $0x04 // VPSRLQ YMM1, YMM0, 4 ; X1: high input + LONG $0xdb7dc1c4; BYTE $0xc0 // VPAND YMM0, YMM0, YMM8 ; X0: low input + LONG $0xdb75c1c4; BYTE $0xc8 // VPAND YMM1, YMM1, YMM8 ; X1: high input + LONG $0x004de2c4; BYTE $0xd0 // VPSHUFB YMM2, YMM6, YMM0 ; X2: mul low part + LONG $0x0045e2c4; BYTE $0xd9 // VPSHUFB YMM3, YMM7, YMM1 ; X2: mul high part + LONG $0xdbefedc5 // VPXOR YMM3, YMM2, YMM3 ; X3: Result + LONG $0xe4efe5c5 // VPXOR YMM4, YMM3, YMM4 ; X4: Result + LONG $0x227ffec5 // VMOVDQU [rdx], YMM4 + + ADDQ $32, SI // in+=32 + ADDQ $32, DX // out+=32 + SUBQ $1, R9 + JNZ loopback_xor_avx2 + +done_xor_avx2: + // VZEROUPPER + BYTE $0xc5; BYTE $0xf8; BYTE $0x77 + RET diff --git a/vendor/github.com/vivint/infectious/addmul_noasm.go b/vendor/github.com/vivint/infectious/addmul_noasm.go new file mode 100644 index 000000000..c5bec2878 --- /dev/null +++ b/vendor/github.com/vivint/infectious/addmul_noasm.go @@ -0,0 +1,40 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// +build !amd64 + +package infectious + +func addmul(z []byte, x []byte, y byte) { + if y == 0 { + return + } + + // hint to the compiler that we don't need bounds checks on x + x = x[:len(z)] + + // TODO(jeff): loop unrolling for SPEEDS + gf_mul_y := gf_mul_table[y][:] + for i := range z { + z[i] ^= gf_mul_y[x[i]] + } +} diff --git a/vendor/github.com/vivint/infectious/addmul_tables_amd64.go b/vendor/github.com/vivint/infectious/addmul_tables_amd64.go new file mode 100644 index 000000000..6d593a99f --- /dev/null +++ b/vendor/github.com/vivint/infectious/addmul_tables_amd64.go @@ -0,0 +1,288 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// Copyright (c) 2015 Klaus Post +// Copyright (c) 2015 Backblaze +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package infectious + +type pair struct { + low, high [16]byte +} + +var mul_table_pair = [256]pair{ + {[16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}, + {[16]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}, [16]byte{0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0}}, + {[16]byte{0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e}, [16]byte{0x00, 0x20, 0x40, 0x60, 0x80, 0xa0, 0xc0, 0xe0, 0x1d, 0x3d, 0x5d, 0x7d, 0x9d, 0xbd, 0xdd, 0xfd}}, + {[16]byte{0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11}, [16]byte{0x00, 0x30, 0x60, 0x50, 0xc0, 0xf0, 0xa0, 0x90, 0x9d, 0xad, 0xfd, 0xcd, 0x5d, 0x6d, 0x3d, 0x0d}}, + {[16]byte{0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x18, 0x1c, 0x20, 0x24, 0x28, 0x2c, 0x30, 0x34, 0x38, 0x3c}, [16]byte{0x00, 0x40, 0x80, 0xc0, 0x1d, 0x5d, 0x9d, 0xdd, 0x3a, 0x7a, 0xba, 0xfa, 0x27, 0x67, 0xa7, 0xe7}}, + {[16]byte{0x00, 0x05, 0x0a, 0x0f, 0x14, 0x11, 0x1e, 0x1b, 0x28, 0x2d, 0x22, 0x27, 0x3c, 0x39, 0x36, 0x33}, [16]byte{0x00, 0x50, 0xa0, 0xf0, 0x5d, 0x0d, 0xfd, 0xad, 0xba, 0xea, 0x1a, 0x4a, 0xe7, 0xb7, 0x47, 0x17}}, + {[16]byte{0x00, 0x06, 0x0c, 0x0a, 0x18, 0x1e, 0x14, 0x12, 0x30, 0x36, 0x3c, 0x3a, 0x28, 0x2e, 0x24, 0x22}, [16]byte{0x00, 0x60, 0xc0, 0xa0, 0x9d, 0xfd, 0x5d, 0x3d, 0x27, 0x47, 0xe7, 0x87, 0xba, 0xda, 0x7a, 0x1a}}, + {[16]byte{0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15, 0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d}, [16]byte{0x00, 0x70, 0xe0, 0x90, 0xdd, 0xad, 0x3d, 0x4d, 0xa7, 0xd7, 0x47, 0x37, 0x7a, 0x0a, 0x9a, 0xea}}, + {[16]byte{0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38, 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78}, [16]byte{0x00, 0x80, 0x1d, 0x9d, 0x3a, 0xba, 0x27, 0xa7, 0x74, 0xf4, 0x69, 0xe9, 0x4e, 0xce, 0x53, 0xd3}}, + {[16]byte{0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77}, [16]byte{0x00, 0x90, 0x3d, 0xad, 0x7a, 0xea, 0x47, 0xd7, 0xf4, 0x64, 0xc9, 0x59, 0x8e, 0x1e, 0xb3, 0x23}}, + {[16]byte{0x00, 0x0a, 0x14, 0x1e, 0x28, 0x22, 0x3c, 0x36, 0x50, 0x5a, 0x44, 0x4e, 0x78, 0x72, 0x6c, 0x66}, [16]byte{0x00, 0xa0, 0x5d, 0xfd, 0xba, 0x1a, 0xe7, 0x47, 0x69, 0xc9, 0x34, 0x94, 0xd3, 0x73, 0x8e, 0x2e}}, + {[16]byte{0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69}, [16]byte{0x00, 0xb0, 0x7d, 0xcd, 0xfa, 0x4a, 0x87, 0x37, 0xe9, 0x59, 0x94, 0x24, 0x13, 0xa3, 0x6e, 0xde}}, + {[16]byte{0x00, 0x0c, 0x18, 0x14, 0x30, 0x3c, 0x28, 0x24, 0x60, 0x6c, 0x78, 0x74, 0x50, 0x5c, 0x48, 0x44}, [16]byte{0x00, 0xc0, 0x9d, 0x5d, 0x27, 0xe7, 0xba, 0x7a, 0x4e, 0x8e, 0xd3, 0x13, 0x69, 0xa9, 0xf4, 0x34}}, + {[16]byte{0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b}, [16]byte{0x00, 0xd0, 0xbd, 0x6d, 0x67, 0xb7, 0xda, 0x0a, 0xce, 0x1e, 0x73, 0xa3, 0xa9, 0x79, 0x14, 0xc4}}, + {[16]byte{0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a}, [16]byte{0x00, 0xe0, 0xdd, 0x3d, 0xa7, 0x47, 0x7a, 0x9a, 0x53, 0xb3, 0x8e, 0x6e, 0xf4, 0x14, 0x29, 0xc9}}, + {[16]byte{0x00, 0x0f, 0x1e, 0x11, 0x3c, 0x33, 0x22, 0x2d, 0x78, 0x77, 0x66, 0x69, 0x44, 0x4b, 0x5a, 0x55}, [16]byte{0x00, 0xf0, 0xfd, 0x0d, 0xe7, 0x17, 0x1a, 0xea, 0xd3, 0x23, 0x2e, 0xde, 0x34, 0xc4, 0xc9, 0x39}}, + {[16]byte{0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, 0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0}, [16]byte{0x00, 0x1d, 0x3a, 0x27, 0x74, 0x69, 0x4e, 0x53, 0xe8, 0xf5, 0xd2, 0xcf, 0x9c, 0x81, 0xa6, 0xbb}}, + {[16]byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}, [16]byte{0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b}}, + {[16]byte{0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e, 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee}, [16]byte{0x00, 0x3d, 0x7a, 0x47, 0xf4, 0xc9, 0x8e, 0xb3, 0xf5, 0xc8, 0x8f, 0xb2, 0x01, 0x3c, 0x7b, 0x46}}, + {[16]byte{0x00, 0x13, 0x26, 0x35, 0x4c, 0x5f, 0x6a, 0x79, 0x98, 0x8b, 0xbe, 0xad, 0xd4, 0xc7, 0xf2, 0xe1}, [16]byte{0x00, 0x2d, 0x5a, 0x77, 0xb4, 0x99, 0xee, 0xc3, 0x75, 0x58, 0x2f, 0x02, 0xc1, 0xec, 0x9b, 0xb6}}, + {[16]byte{0x00, 0x14, 0x28, 0x3c, 0x50, 0x44, 0x78, 0x6c, 0xa0, 0xb4, 0x88, 0x9c, 0xf0, 0xe4, 0xd8, 0xcc}, [16]byte{0x00, 0x5d, 0xba, 0xe7, 0x69, 0x34, 0xd3, 0x8e, 0xd2, 0x8f, 0x68, 0x35, 0xbb, 0xe6, 0x01, 0x5c}}, + {[16]byte{0x00, 0x15, 0x2a, 0x3f, 0x54, 0x41, 0x7e, 0x6b, 0xa8, 0xbd, 0x82, 0x97, 0xfc, 0xe9, 0xd6, 0xc3}, [16]byte{0x00, 0x4d, 0x9a, 0xd7, 0x29, 0x64, 0xb3, 0xfe, 0x52, 0x1f, 0xc8, 0x85, 0x7b, 0x36, 0xe1, 0xac}}, + {[16]byte{0x00, 0x16, 0x2c, 0x3a, 0x58, 0x4e, 0x74, 0x62, 0xb0, 0xa6, 0x9c, 0x8a, 0xe8, 0xfe, 0xc4, 0xd2}, [16]byte{0x00, 0x7d, 0xfa, 0x87, 0xe9, 0x94, 0x13, 0x6e, 0xcf, 0xb2, 0x35, 0x48, 0x26, 0x5b, 0xdc, 0xa1}}, + {[16]byte{0x00, 0x17, 0x2e, 0x39, 0x5c, 0x4b, 0x72, 0x65, 0xb8, 0xaf, 0x96, 0x81, 0xe4, 0xf3, 0xca, 0xdd}, [16]byte{0x00, 0x6d, 0xda, 0xb7, 0xa9, 0xc4, 0x73, 0x1e, 0x4f, 0x22, 0x95, 0xf8, 0xe6, 0x8b, 0x3c, 0x51}}, + {[16]byte{0x00, 0x18, 0x30, 0x28, 0x60, 0x78, 0x50, 0x48, 0xc0, 0xd8, 0xf0, 0xe8, 0xa0, 0xb8, 0x90, 0x88}, [16]byte{0x00, 0x9d, 0x27, 0xba, 0x4e, 0xd3, 0x69, 0xf4, 0x9c, 0x01, 0xbb, 0x26, 0xd2, 0x4f, 0xf5, 0x68}}, + {[16]byte{0x00, 0x19, 0x32, 0x2b, 0x64, 0x7d, 0x56, 0x4f, 0xc8, 0xd1, 0xfa, 0xe3, 0xac, 0xb5, 0x9e, 0x87}, [16]byte{0x00, 0x8d, 0x07, 0x8a, 0x0e, 0x83, 0x09, 0x84, 0x1c, 0x91, 0x1b, 0x96, 0x12, 0x9f, 0x15, 0x98}}, + {[16]byte{0x00, 0x1a, 0x34, 0x2e, 0x68, 0x72, 0x5c, 0x46, 0xd0, 0xca, 0xe4, 0xfe, 0xb8, 0xa2, 0x8c, 0x96}, [16]byte{0x00, 0xbd, 0x67, 0xda, 0xce, 0x73, 0xa9, 0x14, 0x81, 0x3c, 0xe6, 0x5b, 0x4f, 0xf2, 0x28, 0x95}}, + {[16]byte{0x00, 0x1b, 0x36, 0x2d, 0x6c, 0x77, 0x5a, 0x41, 0xd8, 0xc3, 0xee, 0xf5, 0xb4, 0xaf, 0x82, 0x99}, [16]byte{0x00, 0xad, 0x47, 0xea, 0x8e, 0x23, 0xc9, 0x64, 0x01, 0xac, 0x46, 0xeb, 0x8f, 0x22, 0xc8, 0x65}}, + {[16]byte{0x00, 0x1c, 0x38, 0x24, 0x70, 0x6c, 0x48, 0x54, 0xe0, 0xfc, 0xd8, 0xc4, 0x90, 0x8c, 0xa8, 0xb4}, [16]byte{0x00, 0xdd, 0xa7, 0x7a, 0x53, 0x8e, 0xf4, 0x29, 0xa6, 0x7b, 0x01, 0xdc, 0xf5, 0x28, 0x52, 0x8f}}, + {[16]byte{0x00, 0x1d, 0x3a, 0x27, 0x74, 0x69, 0x4e, 0x53, 0xe8, 0xf5, 0xd2, 0xcf, 0x9c, 0x81, 0xa6, 0xbb}, [16]byte{0x00, 0xcd, 0x87, 0x4a, 0x13, 0xde, 0x94, 0x59, 0x26, 0xeb, 0xa1, 0x6c, 0x35, 0xf8, 0xb2, 0x7f}}, + {[16]byte{0x00, 0x1e, 0x3c, 0x22, 0x78, 0x66, 0x44, 0x5a, 0xf0, 0xee, 0xcc, 0xd2, 0x88, 0x96, 0xb4, 0xaa}, [16]byte{0x00, 0xfd, 0xe7, 0x1a, 0xd3, 0x2e, 0x34, 0xc9, 0xbb, 0x46, 0x5c, 0xa1, 0x68, 0x95, 0x8f, 0x72}}, + {[16]byte{0x00, 0x1f, 0x3e, 0x21, 0x7c, 0x63, 0x42, 0x5d, 0xf8, 0xe7, 0xc6, 0xd9, 0x84, 0x9b, 0xba, 0xa5}, [16]byte{0x00, 0xed, 0xc7, 0x2a, 0x93, 0x7e, 0x54, 0xb9, 0x3b, 0xd6, 0xfc, 0x11, 0xa8, 0x45, 0x6f, 0x82}}, + {[16]byte{0x00, 0x20, 0x40, 0x60, 0x80, 0xa0, 0xc0, 0xe0, 0x1d, 0x3d, 0x5d, 0x7d, 0x9d, 0xbd, 0xdd, 0xfd}, [16]byte{0x00, 0x3a, 0x74, 0x4e, 0xe8, 0xd2, 0x9c, 0xa6, 0xcd, 0xf7, 0xb9, 0x83, 0x25, 0x1f, 0x51, 0x6b}}, + {[16]byte{0x00, 0x21, 0x42, 0x63, 0x84, 0xa5, 0xc6, 0xe7, 0x15, 0x34, 0x57, 0x76, 0x91, 0xb0, 0xd3, 0xf2}, [16]byte{0x00, 0x2a, 0x54, 0x7e, 0xa8, 0x82, 0xfc, 0xd6, 0x4d, 0x67, 0x19, 0x33, 0xe5, 0xcf, 0xb1, 0x9b}}, + {[16]byte{0x00, 0x22, 0x44, 0x66, 0x88, 0xaa, 0xcc, 0xee, 0x0d, 0x2f, 0x49, 0x6b, 0x85, 0xa7, 0xc1, 0xe3}, [16]byte{0x00, 0x1a, 0x34, 0x2e, 0x68, 0x72, 0x5c, 0x46, 0xd0, 0xca, 0xe4, 0xfe, 0xb8, 0xa2, 0x8c, 0x96}}, + {[16]byte{0x00, 0x23, 0x46, 0x65, 0x8c, 0xaf, 0xca, 0xe9, 0x05, 0x26, 0x43, 0x60, 0x89, 0xaa, 0xcf, 0xec}, [16]byte{0x00, 0x0a, 0x14, 0x1e, 0x28, 0x22, 0x3c, 0x36, 0x50, 0x5a, 0x44, 0x4e, 0x78, 0x72, 0x6c, 0x66}}, + {[16]byte{0x00, 0x24, 0x48, 0x6c, 0x90, 0xb4, 0xd8, 0xfc, 0x3d, 0x19, 0x75, 0x51, 0xad, 0x89, 0xe5, 0xc1}, [16]byte{0x00, 0x7a, 0xf4, 0x8e, 0xf5, 0x8f, 0x01, 0x7b, 0xf7, 0x8d, 0x03, 0x79, 0x02, 0x78, 0xf6, 0x8c}}, + {[16]byte{0x00, 0x25, 0x4a, 0x6f, 0x94, 0xb1, 0xde, 0xfb, 0x35, 0x10, 0x7f, 0x5a, 0xa1, 0x84, 0xeb, 0xce}, [16]byte{0x00, 0x6a, 0xd4, 0xbe, 0xb5, 0xdf, 0x61, 0x0b, 0x77, 0x1d, 0xa3, 0xc9, 0xc2, 0xa8, 0x16, 0x7c}}, + {[16]byte{0x00, 0x26, 0x4c, 0x6a, 0x98, 0xbe, 0xd4, 0xf2, 0x2d, 0x0b, 0x61, 0x47, 0xb5, 0x93, 0xf9, 0xdf}, [16]byte{0x00, 0x5a, 0xb4, 0xee, 0x75, 0x2f, 0xc1, 0x9b, 0xea, 0xb0, 0x5e, 0x04, 0x9f, 0xc5, 0x2b, 0x71}}, + {[16]byte{0x00, 0x27, 0x4e, 0x69, 0x9c, 0xbb, 0xd2, 0xf5, 0x25, 0x02, 0x6b, 0x4c, 0xb9, 0x9e, 0xf7, 0xd0}, [16]byte{0x00, 0x4a, 0x94, 0xde, 0x35, 0x7f, 0xa1, 0xeb, 0x6a, 0x20, 0xfe, 0xb4, 0x5f, 0x15, 0xcb, 0x81}}, + {[16]byte{0x00, 0x28, 0x50, 0x78, 0xa0, 0x88, 0xf0, 0xd8, 0x5d, 0x75, 0x0d, 0x25, 0xfd, 0xd5, 0xad, 0x85}, [16]byte{0x00, 0xba, 0x69, 0xd3, 0xd2, 0x68, 0xbb, 0x01, 0xb9, 0x03, 0xd0, 0x6a, 0x6b, 0xd1, 0x02, 0xb8}}, + {[16]byte{0x00, 0x29, 0x52, 0x7b, 0xa4, 0x8d, 0xf6, 0xdf, 0x55, 0x7c, 0x07, 0x2e, 0xf1, 0xd8, 0xa3, 0x8a}, [16]byte{0x00, 0xaa, 0x49, 0xe3, 0x92, 0x38, 0xdb, 0x71, 0x39, 0x93, 0x70, 0xda, 0xab, 0x01, 0xe2, 0x48}}, + {[16]byte{0x00, 0x2a, 0x54, 0x7e, 0xa8, 0x82, 0xfc, 0xd6, 0x4d, 0x67, 0x19, 0x33, 0xe5, 0xcf, 0xb1, 0x9b}, [16]byte{0x00, 0x9a, 0x29, 0xb3, 0x52, 0xc8, 0x7b, 0xe1, 0xa4, 0x3e, 0x8d, 0x17, 0xf6, 0x6c, 0xdf, 0x45}}, + {[16]byte{0x00, 0x2b, 0x56, 0x7d, 0xac, 0x87, 0xfa, 0xd1, 0x45, 0x6e, 0x13, 0x38, 0xe9, 0xc2, 0xbf, 0x94}, [16]byte{0x00, 0x8a, 0x09, 0x83, 0x12, 0x98, 0x1b, 0x91, 0x24, 0xae, 0x2d, 0xa7, 0x36, 0xbc, 0x3f, 0xb5}}, + {[16]byte{0x00, 0x2c, 0x58, 0x74, 0xb0, 0x9c, 0xe8, 0xc4, 0x7d, 0x51, 0x25, 0x09, 0xcd, 0xe1, 0x95, 0xb9}, [16]byte{0x00, 0xfa, 0xe9, 0x13, 0xcf, 0x35, 0x26, 0xdc, 0x83, 0x79, 0x6a, 0x90, 0x4c, 0xb6, 0xa5, 0x5f}}, + {[16]byte{0x00, 0x2d, 0x5a, 0x77, 0xb4, 0x99, 0xee, 0xc3, 0x75, 0x58, 0x2f, 0x02, 0xc1, 0xec, 0x9b, 0xb6}, [16]byte{0x00, 0xea, 0xc9, 0x23, 0x8f, 0x65, 0x46, 0xac, 0x03, 0xe9, 0xca, 0x20, 0x8c, 0x66, 0x45, 0xaf}}, + {[16]byte{0x00, 0x2e, 0x5c, 0x72, 0xb8, 0x96, 0xe4, 0xca, 0x6d, 0x43, 0x31, 0x1f, 0xd5, 0xfb, 0x89, 0xa7}, [16]byte{0x00, 0xda, 0xa9, 0x73, 0x4f, 0x95, 0xe6, 0x3c, 0x9e, 0x44, 0x37, 0xed, 0xd1, 0x0b, 0x78, 0xa2}}, + {[16]byte{0x00, 0x2f, 0x5e, 0x71, 0xbc, 0x93, 0xe2, 0xcd, 0x65, 0x4a, 0x3b, 0x14, 0xd9, 0xf6, 0x87, 0xa8}, [16]byte{0x00, 0xca, 0x89, 0x43, 0x0f, 0xc5, 0x86, 0x4c, 0x1e, 0xd4, 0x97, 0x5d, 0x11, 0xdb, 0x98, 0x52}}, + {[16]byte{0x00, 0x30, 0x60, 0x50, 0xc0, 0xf0, 0xa0, 0x90, 0x9d, 0xad, 0xfd, 0xcd, 0x5d, 0x6d, 0x3d, 0x0d}, [16]byte{0x00, 0x27, 0x4e, 0x69, 0x9c, 0xbb, 0xd2, 0xf5, 0x25, 0x02, 0x6b, 0x4c, 0xb9, 0x9e, 0xf7, 0xd0}}, + {[16]byte{0x00, 0x31, 0x62, 0x53, 0xc4, 0xf5, 0xa6, 0x97, 0x95, 0xa4, 0xf7, 0xc6, 0x51, 0x60, 0x33, 0x02}, [16]byte{0x00, 0x37, 0x6e, 0x59, 0xdc, 0xeb, 0xb2, 0x85, 0xa5, 0x92, 0xcb, 0xfc, 0x79, 0x4e, 0x17, 0x20}}, + {[16]byte{0x00, 0x32, 0x64, 0x56, 0xc8, 0xfa, 0xac, 0x9e, 0x8d, 0xbf, 0xe9, 0xdb, 0x45, 0x77, 0x21, 0x13}, [16]byte{0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15, 0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d}}, + {[16]byte{0x00, 0x33, 0x66, 0x55, 0xcc, 0xff, 0xaa, 0x99, 0x85, 0xb6, 0xe3, 0xd0, 0x49, 0x7a, 0x2f, 0x1c}, [16]byte{0x00, 0x17, 0x2e, 0x39, 0x5c, 0x4b, 0x72, 0x65, 0xb8, 0xaf, 0x96, 0x81, 0xe4, 0xf3, 0xca, 0xdd}}, + {[16]byte{0x00, 0x34, 0x68, 0x5c, 0xd0, 0xe4, 0xb8, 0x8c, 0xbd, 0x89, 0xd5, 0xe1, 0x6d, 0x59, 0x05, 0x31}, [16]byte{0x00, 0x67, 0xce, 0xa9, 0x81, 0xe6, 0x4f, 0x28, 0x1f, 0x78, 0xd1, 0xb6, 0x9e, 0xf9, 0x50, 0x37}}, + {[16]byte{0x00, 0x35, 0x6a, 0x5f, 0xd4, 0xe1, 0xbe, 0x8b, 0xb5, 0x80, 0xdf, 0xea, 0x61, 0x54, 0x0b, 0x3e}, [16]byte{0x00, 0x77, 0xee, 0x99, 0xc1, 0xb6, 0x2f, 0x58, 0x9f, 0xe8, 0x71, 0x06, 0x5e, 0x29, 0xb0, 0xc7}}, + {[16]byte{0x00, 0x36, 0x6c, 0x5a, 0xd8, 0xee, 0xb4, 0x82, 0xad, 0x9b, 0xc1, 0xf7, 0x75, 0x43, 0x19, 0x2f}, [16]byte{0x00, 0x47, 0x8e, 0xc9, 0x01, 0x46, 0x8f, 0xc8, 0x02, 0x45, 0x8c, 0xcb, 0x03, 0x44, 0x8d, 0xca}}, + {[16]byte{0x00, 0x37, 0x6e, 0x59, 0xdc, 0xeb, 0xb2, 0x85, 0xa5, 0x92, 0xcb, 0xfc, 0x79, 0x4e, 0x17, 0x20}, [16]byte{0x00, 0x57, 0xae, 0xf9, 0x41, 0x16, 0xef, 0xb8, 0x82, 0xd5, 0x2c, 0x7b, 0xc3, 0x94, 0x6d, 0x3a}}, + {[16]byte{0x00, 0x38, 0x70, 0x48, 0xe0, 0xd8, 0x90, 0xa8, 0xdd, 0xe5, 0xad, 0x95, 0x3d, 0x05, 0x4d, 0x75}, [16]byte{0x00, 0xa7, 0x53, 0xf4, 0xa6, 0x01, 0xf5, 0x52, 0x51, 0xf6, 0x02, 0xa5, 0xf7, 0x50, 0xa4, 0x03}}, + {[16]byte{0x00, 0x39, 0x72, 0x4b, 0xe4, 0xdd, 0x96, 0xaf, 0xd5, 0xec, 0xa7, 0x9e, 0x31, 0x08, 0x43, 0x7a}, [16]byte{0x00, 0xb7, 0x73, 0xc4, 0xe6, 0x51, 0x95, 0x22, 0xd1, 0x66, 0xa2, 0x15, 0x37, 0x80, 0x44, 0xf3}}, + {[16]byte{0x00, 0x3a, 0x74, 0x4e, 0xe8, 0xd2, 0x9c, 0xa6, 0xcd, 0xf7, 0xb9, 0x83, 0x25, 0x1f, 0x51, 0x6b}, [16]byte{0x00, 0x87, 0x13, 0x94, 0x26, 0xa1, 0x35, 0xb2, 0x4c, 0xcb, 0x5f, 0xd8, 0x6a, 0xed, 0x79, 0xfe}}, + {[16]byte{0x00, 0x3b, 0x76, 0x4d, 0xec, 0xd7, 0x9a, 0xa1, 0xc5, 0xfe, 0xb3, 0x88, 0x29, 0x12, 0x5f, 0x64}, [16]byte{0x00, 0x97, 0x33, 0xa4, 0x66, 0xf1, 0x55, 0xc2, 0xcc, 0x5b, 0xff, 0x68, 0xaa, 0x3d, 0x99, 0x0e}}, + {[16]byte{0x00, 0x3c, 0x78, 0x44, 0xf0, 0xcc, 0x88, 0xb4, 0xfd, 0xc1, 0x85, 0xb9, 0x0d, 0x31, 0x75, 0x49}, [16]byte{0x00, 0xe7, 0xd3, 0x34, 0xbb, 0x5c, 0x68, 0x8f, 0x6b, 0x8c, 0xb8, 0x5f, 0xd0, 0x37, 0x03, 0xe4}}, + {[16]byte{0x00, 0x3d, 0x7a, 0x47, 0xf4, 0xc9, 0x8e, 0xb3, 0xf5, 0xc8, 0x8f, 0xb2, 0x01, 0x3c, 0x7b, 0x46}, [16]byte{0x00, 0xf7, 0xf3, 0x04, 0xfb, 0x0c, 0x08, 0xff, 0xeb, 0x1c, 0x18, 0xef, 0x10, 0xe7, 0xe3, 0x14}}, + {[16]byte{0x00, 0x3e, 0x7c, 0x42, 0xf8, 0xc6, 0x84, 0xba, 0xed, 0xd3, 0x91, 0xaf, 0x15, 0x2b, 0x69, 0x57}, [16]byte{0x00, 0xc7, 0x93, 0x54, 0x3b, 0xfc, 0xa8, 0x6f, 0x76, 0xb1, 0xe5, 0x22, 0x4d, 0x8a, 0xde, 0x19}}, + {[16]byte{0x00, 0x3f, 0x7e, 0x41, 0xfc, 0xc3, 0x82, 0xbd, 0xe5, 0xda, 0x9b, 0xa4, 0x19, 0x26, 0x67, 0x58}, [16]byte{0x00, 0xd7, 0xb3, 0x64, 0x7b, 0xac, 0xc8, 0x1f, 0xf6, 0x21, 0x45, 0x92, 0x8d, 0x5a, 0x3e, 0xe9}}, + {[16]byte{0x00, 0x40, 0x80, 0xc0, 0x1d, 0x5d, 0x9d, 0xdd, 0x3a, 0x7a, 0xba, 0xfa, 0x27, 0x67, 0xa7, 0xe7}, [16]byte{0x00, 0x74, 0xe8, 0x9c, 0xcd, 0xb9, 0x25, 0x51, 0x87, 0xf3, 0x6f, 0x1b, 0x4a, 0x3e, 0xa2, 0xd6}}, + {[16]byte{0x00, 0x41, 0x82, 0xc3, 0x19, 0x58, 0x9b, 0xda, 0x32, 0x73, 0xb0, 0xf1, 0x2b, 0x6a, 0xa9, 0xe8}, [16]byte{0x00, 0x64, 0xc8, 0xac, 0x8d, 0xe9, 0x45, 0x21, 0x07, 0x63, 0xcf, 0xab, 0x8a, 0xee, 0x42, 0x26}}, + {[16]byte{0x00, 0x42, 0x84, 0xc6, 0x15, 0x57, 0x91, 0xd3, 0x2a, 0x68, 0xae, 0xec, 0x3f, 0x7d, 0xbb, 0xf9}, [16]byte{0x00, 0x54, 0xa8, 0xfc, 0x4d, 0x19, 0xe5, 0xb1, 0x9a, 0xce, 0x32, 0x66, 0xd7, 0x83, 0x7f, 0x2b}}, + {[16]byte{0x00, 0x43, 0x86, 0xc5, 0x11, 0x52, 0x97, 0xd4, 0x22, 0x61, 0xa4, 0xe7, 0x33, 0x70, 0xb5, 0xf6}, [16]byte{0x00, 0x44, 0x88, 0xcc, 0x0d, 0x49, 0x85, 0xc1, 0x1a, 0x5e, 0x92, 0xd6, 0x17, 0x53, 0x9f, 0xdb}}, + {[16]byte{0x00, 0x44, 0x88, 0xcc, 0x0d, 0x49, 0x85, 0xc1, 0x1a, 0x5e, 0x92, 0xd6, 0x17, 0x53, 0x9f, 0xdb}, [16]byte{0x00, 0x34, 0x68, 0x5c, 0xd0, 0xe4, 0xb8, 0x8c, 0xbd, 0x89, 0xd5, 0xe1, 0x6d, 0x59, 0x05, 0x31}}, + {[16]byte{0x00, 0x45, 0x8a, 0xcf, 0x09, 0x4c, 0x83, 0xc6, 0x12, 0x57, 0x98, 0xdd, 0x1b, 0x5e, 0x91, 0xd4}, [16]byte{0x00, 0x24, 0x48, 0x6c, 0x90, 0xb4, 0xd8, 0xfc, 0x3d, 0x19, 0x75, 0x51, 0xad, 0x89, 0xe5, 0xc1}}, + {[16]byte{0x00, 0x46, 0x8c, 0xca, 0x05, 0x43, 0x89, 0xcf, 0x0a, 0x4c, 0x86, 0xc0, 0x0f, 0x49, 0x83, 0xc5}, [16]byte{0x00, 0x14, 0x28, 0x3c, 0x50, 0x44, 0x78, 0x6c, 0xa0, 0xb4, 0x88, 0x9c, 0xf0, 0xe4, 0xd8, 0xcc}}, + {[16]byte{0x00, 0x47, 0x8e, 0xc9, 0x01, 0x46, 0x8f, 0xc8, 0x02, 0x45, 0x8c, 0xcb, 0x03, 0x44, 0x8d, 0xca}, [16]byte{0x00, 0x04, 0x08, 0x0c, 0x10, 0x14, 0x18, 0x1c, 0x20, 0x24, 0x28, 0x2c, 0x30, 0x34, 0x38, 0x3c}}, + {[16]byte{0x00, 0x48, 0x90, 0xd8, 0x3d, 0x75, 0xad, 0xe5, 0x7a, 0x32, 0xea, 0xa2, 0x47, 0x0f, 0xd7, 0x9f}, [16]byte{0x00, 0xf4, 0xf5, 0x01, 0xf7, 0x03, 0x02, 0xf6, 0xf3, 0x07, 0x06, 0xf2, 0x04, 0xf0, 0xf1, 0x05}}, + {[16]byte{0x00, 0x49, 0x92, 0xdb, 0x39, 0x70, 0xab, 0xe2, 0x72, 0x3b, 0xe0, 0xa9, 0x4b, 0x02, 0xd9, 0x90}, [16]byte{0x00, 0xe4, 0xd5, 0x31, 0xb7, 0x53, 0x62, 0x86, 0x73, 0x97, 0xa6, 0x42, 0xc4, 0x20, 0x11, 0xf5}}, + {[16]byte{0x00, 0x4a, 0x94, 0xde, 0x35, 0x7f, 0xa1, 0xeb, 0x6a, 0x20, 0xfe, 0xb4, 0x5f, 0x15, 0xcb, 0x81}, [16]byte{0x00, 0xd4, 0xb5, 0x61, 0x77, 0xa3, 0xc2, 0x16, 0xee, 0x3a, 0x5b, 0x8f, 0x99, 0x4d, 0x2c, 0xf8}}, + {[16]byte{0x00, 0x4b, 0x96, 0xdd, 0x31, 0x7a, 0xa7, 0xec, 0x62, 0x29, 0xf4, 0xbf, 0x53, 0x18, 0xc5, 0x8e}, [16]byte{0x00, 0xc4, 0x95, 0x51, 0x37, 0xf3, 0xa2, 0x66, 0x6e, 0xaa, 0xfb, 0x3f, 0x59, 0x9d, 0xcc, 0x08}}, + {[16]byte{0x00, 0x4c, 0x98, 0xd4, 0x2d, 0x61, 0xb5, 0xf9, 0x5a, 0x16, 0xc2, 0x8e, 0x77, 0x3b, 0xef, 0xa3}, [16]byte{0x00, 0xb4, 0x75, 0xc1, 0xea, 0x5e, 0x9f, 0x2b, 0xc9, 0x7d, 0xbc, 0x08, 0x23, 0x97, 0x56, 0xe2}}, + {[16]byte{0x00, 0x4d, 0x9a, 0xd7, 0x29, 0x64, 0xb3, 0xfe, 0x52, 0x1f, 0xc8, 0x85, 0x7b, 0x36, 0xe1, 0xac}, [16]byte{0x00, 0xa4, 0x55, 0xf1, 0xaa, 0x0e, 0xff, 0x5b, 0x49, 0xed, 0x1c, 0xb8, 0xe3, 0x47, 0xb6, 0x12}}, + {[16]byte{0x00, 0x4e, 0x9c, 0xd2, 0x25, 0x6b, 0xb9, 0xf7, 0x4a, 0x04, 0xd6, 0x98, 0x6f, 0x21, 0xf3, 0xbd}, [16]byte{0x00, 0x94, 0x35, 0xa1, 0x6a, 0xfe, 0x5f, 0xcb, 0xd4, 0x40, 0xe1, 0x75, 0xbe, 0x2a, 0x8b, 0x1f}}, + {[16]byte{0x00, 0x4f, 0x9e, 0xd1, 0x21, 0x6e, 0xbf, 0xf0, 0x42, 0x0d, 0xdc, 0x93, 0x63, 0x2c, 0xfd, 0xb2}, [16]byte{0x00, 0x84, 0x15, 0x91, 0x2a, 0xae, 0x3f, 0xbb, 0x54, 0xd0, 0x41, 0xc5, 0x7e, 0xfa, 0x6b, 0xef}}, + {[16]byte{0x00, 0x50, 0xa0, 0xf0, 0x5d, 0x0d, 0xfd, 0xad, 0xba, 0xea, 0x1a, 0x4a, 0xe7, 0xb7, 0x47, 0x17}, [16]byte{0x00, 0x69, 0xd2, 0xbb, 0xb9, 0xd0, 0x6b, 0x02, 0x6f, 0x06, 0xbd, 0xd4, 0xd6, 0xbf, 0x04, 0x6d}}, + {[16]byte{0x00, 0x51, 0xa2, 0xf3, 0x59, 0x08, 0xfb, 0xaa, 0xb2, 0xe3, 0x10, 0x41, 0xeb, 0xba, 0x49, 0x18}, [16]byte{0x00, 0x79, 0xf2, 0x8b, 0xf9, 0x80, 0x0b, 0x72, 0xef, 0x96, 0x1d, 0x64, 0x16, 0x6f, 0xe4, 0x9d}}, + {[16]byte{0x00, 0x52, 0xa4, 0xf6, 0x55, 0x07, 0xf1, 0xa3, 0xaa, 0xf8, 0x0e, 0x5c, 0xff, 0xad, 0x5b, 0x09}, [16]byte{0x00, 0x49, 0x92, 0xdb, 0x39, 0x70, 0xab, 0xe2, 0x72, 0x3b, 0xe0, 0xa9, 0x4b, 0x02, 0xd9, 0x90}}, + {[16]byte{0x00, 0x53, 0xa6, 0xf5, 0x51, 0x02, 0xf7, 0xa4, 0xa2, 0xf1, 0x04, 0x57, 0xf3, 0xa0, 0x55, 0x06}, [16]byte{0x00, 0x59, 0xb2, 0xeb, 0x79, 0x20, 0xcb, 0x92, 0xf2, 0xab, 0x40, 0x19, 0x8b, 0xd2, 0x39, 0x60}}, + {[16]byte{0x00, 0x54, 0xa8, 0xfc, 0x4d, 0x19, 0xe5, 0xb1, 0x9a, 0xce, 0x32, 0x66, 0xd7, 0x83, 0x7f, 0x2b}, [16]byte{0x00, 0x29, 0x52, 0x7b, 0xa4, 0x8d, 0xf6, 0xdf, 0x55, 0x7c, 0x07, 0x2e, 0xf1, 0xd8, 0xa3, 0x8a}}, + {[16]byte{0x00, 0x55, 0xaa, 0xff, 0x49, 0x1c, 0xe3, 0xb6, 0x92, 0xc7, 0x38, 0x6d, 0xdb, 0x8e, 0x71, 0x24}, [16]byte{0x00, 0x39, 0x72, 0x4b, 0xe4, 0xdd, 0x96, 0xaf, 0xd5, 0xec, 0xa7, 0x9e, 0x31, 0x08, 0x43, 0x7a}}, + {[16]byte{0x00, 0x56, 0xac, 0xfa, 0x45, 0x13, 0xe9, 0xbf, 0x8a, 0xdc, 0x26, 0x70, 0xcf, 0x99, 0x63, 0x35}, [16]byte{0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77}}, + {[16]byte{0x00, 0x57, 0xae, 0xf9, 0x41, 0x16, 0xef, 0xb8, 0x82, 0xd5, 0x2c, 0x7b, 0xc3, 0x94, 0x6d, 0x3a}, [16]byte{0x00, 0x19, 0x32, 0x2b, 0x64, 0x7d, 0x56, 0x4f, 0xc8, 0xd1, 0xfa, 0xe3, 0xac, 0xb5, 0x9e, 0x87}}, + {[16]byte{0x00, 0x58, 0xb0, 0xe8, 0x7d, 0x25, 0xcd, 0x95, 0xfa, 0xa2, 0x4a, 0x12, 0x87, 0xdf, 0x37, 0x6f}, [16]byte{0x00, 0xe9, 0xcf, 0x26, 0x83, 0x6a, 0x4c, 0xa5, 0x1b, 0xf2, 0xd4, 0x3d, 0x98, 0x71, 0x57, 0xbe}}, + {[16]byte{0x00, 0x59, 0xb2, 0xeb, 0x79, 0x20, 0xcb, 0x92, 0xf2, 0xab, 0x40, 0x19, 0x8b, 0xd2, 0x39, 0x60}, [16]byte{0x00, 0xf9, 0xef, 0x16, 0xc3, 0x3a, 0x2c, 0xd5, 0x9b, 0x62, 0x74, 0x8d, 0x58, 0xa1, 0xb7, 0x4e}}, + {[16]byte{0x00, 0x5a, 0xb4, 0xee, 0x75, 0x2f, 0xc1, 0x9b, 0xea, 0xb0, 0x5e, 0x04, 0x9f, 0xc5, 0x2b, 0x71}, [16]byte{0x00, 0xc9, 0x8f, 0x46, 0x03, 0xca, 0x8c, 0x45, 0x06, 0xcf, 0x89, 0x40, 0x05, 0xcc, 0x8a, 0x43}}, + {[16]byte{0x00, 0x5b, 0xb6, 0xed, 0x71, 0x2a, 0xc7, 0x9c, 0xe2, 0xb9, 0x54, 0x0f, 0x93, 0xc8, 0x25, 0x7e}, [16]byte{0x00, 0xd9, 0xaf, 0x76, 0x43, 0x9a, 0xec, 0x35, 0x86, 0x5f, 0x29, 0xf0, 0xc5, 0x1c, 0x6a, 0xb3}}, + {[16]byte{0x00, 0x5c, 0xb8, 0xe4, 0x6d, 0x31, 0xd5, 0x89, 0xda, 0x86, 0x62, 0x3e, 0xb7, 0xeb, 0x0f, 0x53}, [16]byte{0x00, 0xa9, 0x4f, 0xe6, 0x9e, 0x37, 0xd1, 0x78, 0x21, 0x88, 0x6e, 0xc7, 0xbf, 0x16, 0xf0, 0x59}}, + {[16]byte{0x00, 0x5d, 0xba, 0xe7, 0x69, 0x34, 0xd3, 0x8e, 0xd2, 0x8f, 0x68, 0x35, 0xbb, 0xe6, 0x01, 0x5c}, [16]byte{0x00, 0xb9, 0x6f, 0xd6, 0xde, 0x67, 0xb1, 0x08, 0xa1, 0x18, 0xce, 0x77, 0x7f, 0xc6, 0x10, 0xa9}}, + {[16]byte{0x00, 0x5e, 0xbc, 0xe2, 0x65, 0x3b, 0xd9, 0x87, 0xca, 0x94, 0x76, 0x28, 0xaf, 0xf1, 0x13, 0x4d}, [16]byte{0x00, 0x89, 0x0f, 0x86, 0x1e, 0x97, 0x11, 0x98, 0x3c, 0xb5, 0x33, 0xba, 0x22, 0xab, 0x2d, 0xa4}}, + {[16]byte{0x00, 0x5f, 0xbe, 0xe1, 0x61, 0x3e, 0xdf, 0x80, 0xc2, 0x9d, 0x7c, 0x23, 0xa3, 0xfc, 0x1d, 0x42}, [16]byte{0x00, 0x99, 0x2f, 0xb6, 0x5e, 0xc7, 0x71, 0xe8, 0xbc, 0x25, 0x93, 0x0a, 0xe2, 0x7b, 0xcd, 0x54}}, + {[16]byte{0x00, 0x60, 0xc0, 0xa0, 0x9d, 0xfd, 0x5d, 0x3d, 0x27, 0x47, 0xe7, 0x87, 0xba, 0xda, 0x7a, 0x1a}, [16]byte{0x00, 0x4e, 0x9c, 0xd2, 0x25, 0x6b, 0xb9, 0xf7, 0x4a, 0x04, 0xd6, 0x98, 0x6f, 0x21, 0xf3, 0xbd}}, + {[16]byte{0x00, 0x61, 0xc2, 0xa3, 0x99, 0xf8, 0x5b, 0x3a, 0x2f, 0x4e, 0xed, 0x8c, 0xb6, 0xd7, 0x74, 0x15}, [16]byte{0x00, 0x5e, 0xbc, 0xe2, 0x65, 0x3b, 0xd9, 0x87, 0xca, 0x94, 0x76, 0x28, 0xaf, 0xf1, 0x13, 0x4d}}, + {[16]byte{0x00, 0x62, 0xc4, 0xa6, 0x95, 0xf7, 0x51, 0x33, 0x37, 0x55, 0xf3, 0x91, 0xa2, 0xc0, 0x66, 0x04}, [16]byte{0x00, 0x6e, 0xdc, 0xb2, 0xa5, 0xcb, 0x79, 0x17, 0x57, 0x39, 0x8b, 0xe5, 0xf2, 0x9c, 0x2e, 0x40}}, + {[16]byte{0x00, 0x63, 0xc6, 0xa5, 0x91, 0xf2, 0x57, 0x34, 0x3f, 0x5c, 0xf9, 0x9a, 0xae, 0xcd, 0x68, 0x0b}, [16]byte{0x00, 0x7e, 0xfc, 0x82, 0xe5, 0x9b, 0x19, 0x67, 0xd7, 0xa9, 0x2b, 0x55, 0x32, 0x4c, 0xce, 0xb0}}, + {[16]byte{0x00, 0x64, 0xc8, 0xac, 0x8d, 0xe9, 0x45, 0x21, 0x07, 0x63, 0xcf, 0xab, 0x8a, 0xee, 0x42, 0x26}, [16]byte{0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a}}, + {[16]byte{0x00, 0x65, 0xca, 0xaf, 0x89, 0xec, 0x43, 0x26, 0x0f, 0x6a, 0xc5, 0xa0, 0x86, 0xe3, 0x4c, 0x29}, [16]byte{0x00, 0x1e, 0x3c, 0x22, 0x78, 0x66, 0x44, 0x5a, 0xf0, 0xee, 0xcc, 0xd2, 0x88, 0x96, 0xb4, 0xaa}}, + {[16]byte{0x00, 0x66, 0xcc, 0xaa, 0x85, 0xe3, 0x49, 0x2f, 0x17, 0x71, 0xdb, 0xbd, 0x92, 0xf4, 0x5e, 0x38}, [16]byte{0x00, 0x2e, 0x5c, 0x72, 0xb8, 0x96, 0xe4, 0xca, 0x6d, 0x43, 0x31, 0x1f, 0xd5, 0xfb, 0x89, 0xa7}}, + {[16]byte{0x00, 0x67, 0xce, 0xa9, 0x81, 0xe6, 0x4f, 0x28, 0x1f, 0x78, 0xd1, 0xb6, 0x9e, 0xf9, 0x50, 0x37}, [16]byte{0x00, 0x3e, 0x7c, 0x42, 0xf8, 0xc6, 0x84, 0xba, 0xed, 0xd3, 0x91, 0xaf, 0x15, 0x2b, 0x69, 0x57}}, + {[16]byte{0x00, 0x68, 0xd0, 0xb8, 0xbd, 0xd5, 0x6d, 0x05, 0x67, 0x0f, 0xb7, 0xdf, 0xda, 0xb2, 0x0a, 0x62}, [16]byte{0x00, 0xce, 0x81, 0x4f, 0x1f, 0xd1, 0x9e, 0x50, 0x3e, 0xf0, 0xbf, 0x71, 0x21, 0xef, 0xa0, 0x6e}}, + {[16]byte{0x00, 0x69, 0xd2, 0xbb, 0xb9, 0xd0, 0x6b, 0x02, 0x6f, 0x06, 0xbd, 0xd4, 0xd6, 0xbf, 0x04, 0x6d}, [16]byte{0x00, 0xde, 0xa1, 0x7f, 0x5f, 0x81, 0xfe, 0x20, 0xbe, 0x60, 0x1f, 0xc1, 0xe1, 0x3f, 0x40, 0x9e}}, + {[16]byte{0x00, 0x6a, 0xd4, 0xbe, 0xb5, 0xdf, 0x61, 0x0b, 0x77, 0x1d, 0xa3, 0xc9, 0xc2, 0xa8, 0x16, 0x7c}, [16]byte{0x00, 0xee, 0xc1, 0x2f, 0x9f, 0x71, 0x5e, 0xb0, 0x23, 0xcd, 0xe2, 0x0c, 0xbc, 0x52, 0x7d, 0x93}}, + {[16]byte{0x00, 0x6b, 0xd6, 0xbd, 0xb1, 0xda, 0x67, 0x0c, 0x7f, 0x14, 0xa9, 0xc2, 0xce, 0xa5, 0x18, 0x73}, [16]byte{0x00, 0xfe, 0xe1, 0x1f, 0xdf, 0x21, 0x3e, 0xc0, 0xa3, 0x5d, 0x42, 0xbc, 0x7c, 0x82, 0x9d, 0x63}}, + {[16]byte{0x00, 0x6c, 0xd8, 0xb4, 0xad, 0xc1, 0x75, 0x19, 0x47, 0x2b, 0x9f, 0xf3, 0xea, 0x86, 0x32, 0x5e}, [16]byte{0x00, 0x8e, 0x01, 0x8f, 0x02, 0x8c, 0x03, 0x8d, 0x04, 0x8a, 0x05, 0x8b, 0x06, 0x88, 0x07, 0x89}}, + {[16]byte{0x00, 0x6d, 0xda, 0xb7, 0xa9, 0xc4, 0x73, 0x1e, 0x4f, 0x22, 0x95, 0xf8, 0xe6, 0x8b, 0x3c, 0x51}, [16]byte{0x00, 0x9e, 0x21, 0xbf, 0x42, 0xdc, 0x63, 0xfd, 0x84, 0x1a, 0xa5, 0x3b, 0xc6, 0x58, 0xe7, 0x79}}, + {[16]byte{0x00, 0x6e, 0xdc, 0xb2, 0xa5, 0xcb, 0x79, 0x17, 0x57, 0x39, 0x8b, 0xe5, 0xf2, 0x9c, 0x2e, 0x40}, [16]byte{0x00, 0xae, 0x41, 0xef, 0x82, 0x2c, 0xc3, 0x6d, 0x19, 0xb7, 0x58, 0xf6, 0x9b, 0x35, 0xda, 0x74}}, + {[16]byte{0x00, 0x6f, 0xde, 0xb1, 0xa1, 0xce, 0x7f, 0x10, 0x5f, 0x30, 0x81, 0xee, 0xfe, 0x91, 0x20, 0x4f}, [16]byte{0x00, 0xbe, 0x61, 0xdf, 0xc2, 0x7c, 0xa3, 0x1d, 0x99, 0x27, 0xf8, 0x46, 0x5b, 0xe5, 0x3a, 0x84}}, + {[16]byte{0x00, 0x70, 0xe0, 0x90, 0xdd, 0xad, 0x3d, 0x4d, 0xa7, 0xd7, 0x47, 0x37, 0x7a, 0x0a, 0x9a, 0xea}, [16]byte{0x00, 0x53, 0xa6, 0xf5, 0x51, 0x02, 0xf7, 0xa4, 0xa2, 0xf1, 0x04, 0x57, 0xf3, 0xa0, 0x55, 0x06}}, + {[16]byte{0x00, 0x71, 0xe2, 0x93, 0xd9, 0xa8, 0x3b, 0x4a, 0xaf, 0xde, 0x4d, 0x3c, 0x76, 0x07, 0x94, 0xe5}, [16]byte{0x00, 0x43, 0x86, 0xc5, 0x11, 0x52, 0x97, 0xd4, 0x22, 0x61, 0xa4, 0xe7, 0x33, 0x70, 0xb5, 0xf6}}, + {[16]byte{0x00, 0x72, 0xe4, 0x96, 0xd5, 0xa7, 0x31, 0x43, 0xb7, 0xc5, 0x53, 0x21, 0x62, 0x10, 0x86, 0xf4}, [16]byte{0x00, 0x73, 0xe6, 0x95, 0xd1, 0xa2, 0x37, 0x44, 0xbf, 0xcc, 0x59, 0x2a, 0x6e, 0x1d, 0x88, 0xfb}}, + {[16]byte{0x00, 0x73, 0xe6, 0x95, 0xd1, 0xa2, 0x37, 0x44, 0xbf, 0xcc, 0x59, 0x2a, 0x6e, 0x1d, 0x88, 0xfb}, [16]byte{0x00, 0x63, 0xc6, 0xa5, 0x91, 0xf2, 0x57, 0x34, 0x3f, 0x5c, 0xf9, 0x9a, 0xae, 0xcd, 0x68, 0x0b}}, + {[16]byte{0x00, 0x74, 0xe8, 0x9c, 0xcd, 0xb9, 0x25, 0x51, 0x87, 0xf3, 0x6f, 0x1b, 0x4a, 0x3e, 0xa2, 0xd6}, [16]byte{0x00, 0x13, 0x26, 0x35, 0x4c, 0x5f, 0x6a, 0x79, 0x98, 0x8b, 0xbe, 0xad, 0xd4, 0xc7, 0xf2, 0xe1}}, + {[16]byte{0x00, 0x75, 0xea, 0x9f, 0xc9, 0xbc, 0x23, 0x56, 0x8f, 0xfa, 0x65, 0x10, 0x46, 0x33, 0xac, 0xd9}, [16]byte{0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11}}, + {[16]byte{0x00, 0x76, 0xec, 0x9a, 0xc5, 0xb3, 0x29, 0x5f, 0x97, 0xe1, 0x7b, 0x0d, 0x52, 0x24, 0xbe, 0xc8}, [16]byte{0x00, 0x33, 0x66, 0x55, 0xcc, 0xff, 0xaa, 0x99, 0x85, 0xb6, 0xe3, 0xd0, 0x49, 0x7a, 0x2f, 0x1c}}, + {[16]byte{0x00, 0x77, 0xee, 0x99, 0xc1, 0xb6, 0x2f, 0x58, 0x9f, 0xe8, 0x71, 0x06, 0x5e, 0x29, 0xb0, 0xc7}, [16]byte{0x00, 0x23, 0x46, 0x65, 0x8c, 0xaf, 0xca, 0xe9, 0x05, 0x26, 0x43, 0x60, 0x89, 0xaa, 0xcf, 0xec}}, + {[16]byte{0x00, 0x78, 0xf0, 0x88, 0xfd, 0x85, 0x0d, 0x75, 0xe7, 0x9f, 0x17, 0x6f, 0x1a, 0x62, 0xea, 0x92}, [16]byte{0x00, 0xd3, 0xbb, 0x68, 0x6b, 0xb8, 0xd0, 0x03, 0xd6, 0x05, 0x6d, 0xbe, 0xbd, 0x6e, 0x06, 0xd5}}, + {[16]byte{0x00, 0x79, 0xf2, 0x8b, 0xf9, 0x80, 0x0b, 0x72, 0xef, 0x96, 0x1d, 0x64, 0x16, 0x6f, 0xe4, 0x9d}, [16]byte{0x00, 0xc3, 0x9b, 0x58, 0x2b, 0xe8, 0xb0, 0x73, 0x56, 0x95, 0xcd, 0x0e, 0x7d, 0xbe, 0xe6, 0x25}}, + {[16]byte{0x00, 0x7a, 0xf4, 0x8e, 0xf5, 0x8f, 0x01, 0x7b, 0xf7, 0x8d, 0x03, 0x79, 0x02, 0x78, 0xf6, 0x8c}, [16]byte{0x00, 0xf3, 0xfb, 0x08, 0xeb, 0x18, 0x10, 0xe3, 0xcb, 0x38, 0x30, 0xc3, 0x20, 0xd3, 0xdb, 0x28}}, + {[16]byte{0x00, 0x7b, 0xf6, 0x8d, 0xf1, 0x8a, 0x07, 0x7c, 0xff, 0x84, 0x09, 0x72, 0x0e, 0x75, 0xf8, 0x83}, [16]byte{0x00, 0xe3, 0xdb, 0x38, 0xab, 0x48, 0x70, 0x93, 0x4b, 0xa8, 0x90, 0x73, 0xe0, 0x03, 0x3b, 0xd8}}, + {[16]byte{0x00, 0x7c, 0xf8, 0x84, 0xed, 0x91, 0x15, 0x69, 0xc7, 0xbb, 0x3f, 0x43, 0x2a, 0x56, 0xd2, 0xae}, [16]byte{0x00, 0x93, 0x3b, 0xa8, 0x76, 0xe5, 0x4d, 0xde, 0xec, 0x7f, 0xd7, 0x44, 0x9a, 0x09, 0xa1, 0x32}}, + {[16]byte{0x00, 0x7d, 0xfa, 0x87, 0xe9, 0x94, 0x13, 0x6e, 0xcf, 0xb2, 0x35, 0x48, 0x26, 0x5b, 0xdc, 0xa1}, [16]byte{0x00, 0x83, 0x1b, 0x98, 0x36, 0xb5, 0x2d, 0xae, 0x6c, 0xef, 0x77, 0xf4, 0x5a, 0xd9, 0x41, 0xc2}}, + {[16]byte{0x00, 0x7e, 0xfc, 0x82, 0xe5, 0x9b, 0x19, 0x67, 0xd7, 0xa9, 0x2b, 0x55, 0x32, 0x4c, 0xce, 0xb0}, [16]byte{0x00, 0xb3, 0x7b, 0xc8, 0xf6, 0x45, 0x8d, 0x3e, 0xf1, 0x42, 0x8a, 0x39, 0x07, 0xb4, 0x7c, 0xcf}}, + {[16]byte{0x00, 0x7f, 0xfe, 0x81, 0xe1, 0x9e, 0x1f, 0x60, 0xdf, 0xa0, 0x21, 0x5e, 0x3e, 0x41, 0xc0, 0xbf}, [16]byte{0x00, 0xa3, 0x5b, 0xf8, 0xb6, 0x15, 0xed, 0x4e, 0x71, 0xd2, 0x2a, 0x89, 0xc7, 0x64, 0x9c, 0x3f}}, + {[16]byte{0x00, 0x80, 0x1d, 0x9d, 0x3a, 0xba, 0x27, 0xa7, 0x74, 0xf4, 0x69, 0xe9, 0x4e, 0xce, 0x53, 0xd3}, [16]byte{0x00, 0xe8, 0xcd, 0x25, 0x87, 0x6f, 0x4a, 0xa2, 0x13, 0xfb, 0xde, 0x36, 0x94, 0x7c, 0x59, 0xb1}}, + {[16]byte{0x00, 0x81, 0x1f, 0x9e, 0x3e, 0xbf, 0x21, 0xa0, 0x7c, 0xfd, 0x63, 0xe2, 0x42, 0xc3, 0x5d, 0xdc}, [16]byte{0x00, 0xf8, 0xed, 0x15, 0xc7, 0x3f, 0x2a, 0xd2, 0x93, 0x6b, 0x7e, 0x86, 0x54, 0xac, 0xb9, 0x41}}, + {[16]byte{0x00, 0x82, 0x19, 0x9b, 0x32, 0xb0, 0x2b, 0xa9, 0x64, 0xe6, 0x7d, 0xff, 0x56, 0xd4, 0x4f, 0xcd}, [16]byte{0x00, 0xc8, 0x8d, 0x45, 0x07, 0xcf, 0x8a, 0x42, 0x0e, 0xc6, 0x83, 0x4b, 0x09, 0xc1, 0x84, 0x4c}}, + {[16]byte{0x00, 0x83, 0x1b, 0x98, 0x36, 0xb5, 0x2d, 0xae, 0x6c, 0xef, 0x77, 0xf4, 0x5a, 0xd9, 0x41, 0xc2}, [16]byte{0x00, 0xd8, 0xad, 0x75, 0x47, 0x9f, 0xea, 0x32, 0x8e, 0x56, 0x23, 0xfb, 0xc9, 0x11, 0x64, 0xbc}}, + {[16]byte{0x00, 0x84, 0x15, 0x91, 0x2a, 0xae, 0x3f, 0xbb, 0x54, 0xd0, 0x41, 0xc5, 0x7e, 0xfa, 0x6b, 0xef}, [16]byte{0x00, 0xa8, 0x4d, 0xe5, 0x9a, 0x32, 0xd7, 0x7f, 0x29, 0x81, 0x64, 0xcc, 0xb3, 0x1b, 0xfe, 0x56}}, + {[16]byte{0x00, 0x85, 0x17, 0x92, 0x2e, 0xab, 0x39, 0xbc, 0x5c, 0xd9, 0x4b, 0xce, 0x72, 0xf7, 0x65, 0xe0}, [16]byte{0x00, 0xb8, 0x6d, 0xd5, 0xda, 0x62, 0xb7, 0x0f, 0xa9, 0x11, 0xc4, 0x7c, 0x73, 0xcb, 0x1e, 0xa6}}, + {[16]byte{0x00, 0x86, 0x11, 0x97, 0x22, 0xa4, 0x33, 0xb5, 0x44, 0xc2, 0x55, 0xd3, 0x66, 0xe0, 0x77, 0xf1}, [16]byte{0x00, 0x88, 0x0d, 0x85, 0x1a, 0x92, 0x17, 0x9f, 0x34, 0xbc, 0x39, 0xb1, 0x2e, 0xa6, 0x23, 0xab}}, + {[16]byte{0x00, 0x87, 0x13, 0x94, 0x26, 0xa1, 0x35, 0xb2, 0x4c, 0xcb, 0x5f, 0xd8, 0x6a, 0xed, 0x79, 0xfe}, [16]byte{0x00, 0x98, 0x2d, 0xb5, 0x5a, 0xc2, 0x77, 0xef, 0xb4, 0x2c, 0x99, 0x01, 0xee, 0x76, 0xc3, 0x5b}}, + {[16]byte{0x00, 0x88, 0x0d, 0x85, 0x1a, 0x92, 0x17, 0x9f, 0x34, 0xbc, 0x39, 0xb1, 0x2e, 0xa6, 0x23, 0xab}, [16]byte{0x00, 0x68, 0xd0, 0xb8, 0xbd, 0xd5, 0x6d, 0x05, 0x67, 0x0f, 0xb7, 0xdf, 0xda, 0xb2, 0x0a, 0x62}}, + {[16]byte{0x00, 0x89, 0x0f, 0x86, 0x1e, 0x97, 0x11, 0x98, 0x3c, 0xb5, 0x33, 0xba, 0x22, 0xab, 0x2d, 0xa4}, [16]byte{0x00, 0x78, 0xf0, 0x88, 0xfd, 0x85, 0x0d, 0x75, 0xe7, 0x9f, 0x17, 0x6f, 0x1a, 0x62, 0xea, 0x92}}, + {[16]byte{0x00, 0x8a, 0x09, 0x83, 0x12, 0x98, 0x1b, 0x91, 0x24, 0xae, 0x2d, 0xa7, 0x36, 0xbc, 0x3f, 0xb5}, [16]byte{0x00, 0x48, 0x90, 0xd8, 0x3d, 0x75, 0xad, 0xe5, 0x7a, 0x32, 0xea, 0xa2, 0x47, 0x0f, 0xd7, 0x9f}}, + {[16]byte{0x00, 0x8b, 0x0b, 0x80, 0x16, 0x9d, 0x1d, 0x96, 0x2c, 0xa7, 0x27, 0xac, 0x3a, 0xb1, 0x31, 0xba}, [16]byte{0x00, 0x58, 0xb0, 0xe8, 0x7d, 0x25, 0xcd, 0x95, 0xfa, 0xa2, 0x4a, 0x12, 0x87, 0xdf, 0x37, 0x6f}}, + {[16]byte{0x00, 0x8c, 0x05, 0x89, 0x0a, 0x86, 0x0f, 0x83, 0x14, 0x98, 0x11, 0x9d, 0x1e, 0x92, 0x1b, 0x97}, [16]byte{0x00, 0x28, 0x50, 0x78, 0xa0, 0x88, 0xf0, 0xd8, 0x5d, 0x75, 0x0d, 0x25, 0xfd, 0xd5, 0xad, 0x85}}, + {[16]byte{0x00, 0x8d, 0x07, 0x8a, 0x0e, 0x83, 0x09, 0x84, 0x1c, 0x91, 0x1b, 0x96, 0x12, 0x9f, 0x15, 0x98}, [16]byte{0x00, 0x38, 0x70, 0x48, 0xe0, 0xd8, 0x90, 0xa8, 0xdd, 0xe5, 0xad, 0x95, 0x3d, 0x05, 0x4d, 0x75}}, + {[16]byte{0x00, 0x8e, 0x01, 0x8f, 0x02, 0x8c, 0x03, 0x8d, 0x04, 0x8a, 0x05, 0x8b, 0x06, 0x88, 0x07, 0x89}, [16]byte{0x00, 0x08, 0x10, 0x18, 0x20, 0x28, 0x30, 0x38, 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78}}, + {[16]byte{0x00, 0x8f, 0x03, 0x8c, 0x06, 0x89, 0x05, 0x8a, 0x0c, 0x83, 0x0f, 0x80, 0x0a, 0x85, 0x09, 0x86}, [16]byte{0x00, 0x18, 0x30, 0x28, 0x60, 0x78, 0x50, 0x48, 0xc0, 0xd8, 0xf0, 0xe8, 0xa0, 0xb8, 0x90, 0x88}}, + {[16]byte{0x00, 0x90, 0x3d, 0xad, 0x7a, 0xea, 0x47, 0xd7, 0xf4, 0x64, 0xc9, 0x59, 0x8e, 0x1e, 0xb3, 0x23}, [16]byte{0x00, 0xf5, 0xf7, 0x02, 0xf3, 0x06, 0x04, 0xf1, 0xfb, 0x0e, 0x0c, 0xf9, 0x08, 0xfd, 0xff, 0x0a}}, + {[16]byte{0x00, 0x91, 0x3f, 0xae, 0x7e, 0xef, 0x41, 0xd0, 0xfc, 0x6d, 0xc3, 0x52, 0x82, 0x13, 0xbd, 0x2c}, [16]byte{0x00, 0xe5, 0xd7, 0x32, 0xb3, 0x56, 0x64, 0x81, 0x7b, 0x9e, 0xac, 0x49, 0xc8, 0x2d, 0x1f, 0xfa}}, + {[16]byte{0x00, 0x92, 0x39, 0xab, 0x72, 0xe0, 0x4b, 0xd9, 0xe4, 0x76, 0xdd, 0x4f, 0x96, 0x04, 0xaf, 0x3d}, [16]byte{0x00, 0xd5, 0xb7, 0x62, 0x73, 0xa6, 0xc4, 0x11, 0xe6, 0x33, 0x51, 0x84, 0x95, 0x40, 0x22, 0xf7}}, + {[16]byte{0x00, 0x93, 0x3b, 0xa8, 0x76, 0xe5, 0x4d, 0xde, 0xec, 0x7f, 0xd7, 0x44, 0x9a, 0x09, 0xa1, 0x32}, [16]byte{0x00, 0xc5, 0x97, 0x52, 0x33, 0xf6, 0xa4, 0x61, 0x66, 0xa3, 0xf1, 0x34, 0x55, 0x90, 0xc2, 0x07}}, + {[16]byte{0x00, 0x94, 0x35, 0xa1, 0x6a, 0xfe, 0x5f, 0xcb, 0xd4, 0x40, 0xe1, 0x75, 0xbe, 0x2a, 0x8b, 0x1f}, [16]byte{0x00, 0xb5, 0x77, 0xc2, 0xee, 0x5b, 0x99, 0x2c, 0xc1, 0x74, 0xb6, 0x03, 0x2f, 0x9a, 0x58, 0xed}}, + {[16]byte{0x00, 0x95, 0x37, 0xa2, 0x6e, 0xfb, 0x59, 0xcc, 0xdc, 0x49, 0xeb, 0x7e, 0xb2, 0x27, 0x85, 0x10}, [16]byte{0x00, 0xa5, 0x57, 0xf2, 0xae, 0x0b, 0xf9, 0x5c, 0x41, 0xe4, 0x16, 0xb3, 0xef, 0x4a, 0xb8, 0x1d}}, + {[16]byte{0x00, 0x96, 0x31, 0xa7, 0x62, 0xf4, 0x53, 0xc5, 0xc4, 0x52, 0xf5, 0x63, 0xa6, 0x30, 0x97, 0x01}, [16]byte{0x00, 0x95, 0x37, 0xa2, 0x6e, 0xfb, 0x59, 0xcc, 0xdc, 0x49, 0xeb, 0x7e, 0xb2, 0x27, 0x85, 0x10}}, + {[16]byte{0x00, 0x97, 0x33, 0xa4, 0x66, 0xf1, 0x55, 0xc2, 0xcc, 0x5b, 0xff, 0x68, 0xaa, 0x3d, 0x99, 0x0e}, [16]byte{0x00, 0x85, 0x17, 0x92, 0x2e, 0xab, 0x39, 0xbc, 0x5c, 0xd9, 0x4b, 0xce, 0x72, 0xf7, 0x65, 0xe0}}, + {[16]byte{0x00, 0x98, 0x2d, 0xb5, 0x5a, 0xc2, 0x77, 0xef, 0xb4, 0x2c, 0x99, 0x01, 0xee, 0x76, 0xc3, 0x5b}, [16]byte{0x00, 0x75, 0xea, 0x9f, 0xc9, 0xbc, 0x23, 0x56, 0x8f, 0xfa, 0x65, 0x10, 0x46, 0x33, 0xac, 0xd9}}, + {[16]byte{0x00, 0x99, 0x2f, 0xb6, 0x5e, 0xc7, 0x71, 0xe8, 0xbc, 0x25, 0x93, 0x0a, 0xe2, 0x7b, 0xcd, 0x54}, [16]byte{0x00, 0x65, 0xca, 0xaf, 0x89, 0xec, 0x43, 0x26, 0x0f, 0x6a, 0xc5, 0xa0, 0x86, 0xe3, 0x4c, 0x29}}, + {[16]byte{0x00, 0x9a, 0x29, 0xb3, 0x52, 0xc8, 0x7b, 0xe1, 0xa4, 0x3e, 0x8d, 0x17, 0xf6, 0x6c, 0xdf, 0x45}, [16]byte{0x00, 0x55, 0xaa, 0xff, 0x49, 0x1c, 0xe3, 0xb6, 0x92, 0xc7, 0x38, 0x6d, 0xdb, 0x8e, 0x71, 0x24}}, + {[16]byte{0x00, 0x9b, 0x2b, 0xb0, 0x56, 0xcd, 0x7d, 0xe6, 0xac, 0x37, 0x87, 0x1c, 0xfa, 0x61, 0xd1, 0x4a}, [16]byte{0x00, 0x45, 0x8a, 0xcf, 0x09, 0x4c, 0x83, 0xc6, 0x12, 0x57, 0x98, 0xdd, 0x1b, 0x5e, 0x91, 0xd4}}, + {[16]byte{0x00, 0x9c, 0x25, 0xb9, 0x4a, 0xd6, 0x6f, 0xf3, 0x94, 0x08, 0xb1, 0x2d, 0xde, 0x42, 0xfb, 0x67}, [16]byte{0x00, 0x35, 0x6a, 0x5f, 0xd4, 0xe1, 0xbe, 0x8b, 0xb5, 0x80, 0xdf, 0xea, 0x61, 0x54, 0x0b, 0x3e}}, + {[16]byte{0x00, 0x9d, 0x27, 0xba, 0x4e, 0xd3, 0x69, 0xf4, 0x9c, 0x01, 0xbb, 0x26, 0xd2, 0x4f, 0xf5, 0x68}, [16]byte{0x00, 0x25, 0x4a, 0x6f, 0x94, 0xb1, 0xde, 0xfb, 0x35, 0x10, 0x7f, 0x5a, 0xa1, 0x84, 0xeb, 0xce}}, + {[16]byte{0x00, 0x9e, 0x21, 0xbf, 0x42, 0xdc, 0x63, 0xfd, 0x84, 0x1a, 0xa5, 0x3b, 0xc6, 0x58, 0xe7, 0x79}, [16]byte{0x00, 0x15, 0x2a, 0x3f, 0x54, 0x41, 0x7e, 0x6b, 0xa8, 0xbd, 0x82, 0x97, 0xfc, 0xe9, 0xd6, 0xc3}}, + {[16]byte{0x00, 0x9f, 0x23, 0xbc, 0x46, 0xd9, 0x65, 0xfa, 0x8c, 0x13, 0xaf, 0x30, 0xca, 0x55, 0xe9, 0x76}, [16]byte{0x00, 0x05, 0x0a, 0x0f, 0x14, 0x11, 0x1e, 0x1b, 0x28, 0x2d, 0x22, 0x27, 0x3c, 0x39, 0x36, 0x33}}, + {[16]byte{0x00, 0xa0, 0x5d, 0xfd, 0xba, 0x1a, 0xe7, 0x47, 0x69, 0xc9, 0x34, 0x94, 0xd3, 0x73, 0x8e, 0x2e}, [16]byte{0x00, 0xd2, 0xb9, 0x6b, 0x6f, 0xbd, 0xd6, 0x04, 0xde, 0x0c, 0x67, 0xb5, 0xb1, 0x63, 0x08, 0xda}}, + {[16]byte{0x00, 0xa1, 0x5f, 0xfe, 0xbe, 0x1f, 0xe1, 0x40, 0x61, 0xc0, 0x3e, 0x9f, 0xdf, 0x7e, 0x80, 0x21}, [16]byte{0x00, 0xc2, 0x99, 0x5b, 0x2f, 0xed, 0xb6, 0x74, 0x5e, 0x9c, 0xc7, 0x05, 0x71, 0xb3, 0xe8, 0x2a}}, + {[16]byte{0x00, 0xa2, 0x59, 0xfb, 0xb2, 0x10, 0xeb, 0x49, 0x79, 0xdb, 0x20, 0x82, 0xcb, 0x69, 0x92, 0x30}, [16]byte{0x00, 0xf2, 0xf9, 0x0b, 0xef, 0x1d, 0x16, 0xe4, 0xc3, 0x31, 0x3a, 0xc8, 0x2c, 0xde, 0xd5, 0x27}}, + {[16]byte{0x00, 0xa3, 0x5b, 0xf8, 0xb6, 0x15, 0xed, 0x4e, 0x71, 0xd2, 0x2a, 0x89, 0xc7, 0x64, 0x9c, 0x3f}, [16]byte{0x00, 0xe2, 0xd9, 0x3b, 0xaf, 0x4d, 0x76, 0x94, 0x43, 0xa1, 0x9a, 0x78, 0xec, 0x0e, 0x35, 0xd7}}, + {[16]byte{0x00, 0xa4, 0x55, 0xf1, 0xaa, 0x0e, 0xff, 0x5b, 0x49, 0xed, 0x1c, 0xb8, 0xe3, 0x47, 0xb6, 0x12}, [16]byte{0x00, 0x92, 0x39, 0xab, 0x72, 0xe0, 0x4b, 0xd9, 0xe4, 0x76, 0xdd, 0x4f, 0x96, 0x04, 0xaf, 0x3d}}, + {[16]byte{0x00, 0xa5, 0x57, 0xf2, 0xae, 0x0b, 0xf9, 0x5c, 0x41, 0xe4, 0x16, 0xb3, 0xef, 0x4a, 0xb8, 0x1d}, [16]byte{0x00, 0x82, 0x19, 0x9b, 0x32, 0xb0, 0x2b, 0xa9, 0x64, 0xe6, 0x7d, 0xff, 0x56, 0xd4, 0x4f, 0xcd}}, + {[16]byte{0x00, 0xa6, 0x51, 0xf7, 0xa2, 0x04, 0xf3, 0x55, 0x59, 0xff, 0x08, 0xae, 0xfb, 0x5d, 0xaa, 0x0c}, [16]byte{0x00, 0xb2, 0x79, 0xcb, 0xf2, 0x40, 0x8b, 0x39, 0xf9, 0x4b, 0x80, 0x32, 0x0b, 0xb9, 0x72, 0xc0}}, + {[16]byte{0x00, 0xa7, 0x53, 0xf4, 0xa6, 0x01, 0xf5, 0x52, 0x51, 0xf6, 0x02, 0xa5, 0xf7, 0x50, 0xa4, 0x03}, [16]byte{0x00, 0xa2, 0x59, 0xfb, 0xb2, 0x10, 0xeb, 0x49, 0x79, 0xdb, 0x20, 0x82, 0xcb, 0x69, 0x92, 0x30}}, + {[16]byte{0x00, 0xa8, 0x4d, 0xe5, 0x9a, 0x32, 0xd7, 0x7f, 0x29, 0x81, 0x64, 0xcc, 0xb3, 0x1b, 0xfe, 0x56}, [16]byte{0x00, 0x52, 0xa4, 0xf6, 0x55, 0x07, 0xf1, 0xa3, 0xaa, 0xf8, 0x0e, 0x5c, 0xff, 0xad, 0x5b, 0x09}}, + {[16]byte{0x00, 0xa9, 0x4f, 0xe6, 0x9e, 0x37, 0xd1, 0x78, 0x21, 0x88, 0x6e, 0xc7, 0xbf, 0x16, 0xf0, 0x59}, [16]byte{0x00, 0x42, 0x84, 0xc6, 0x15, 0x57, 0x91, 0xd3, 0x2a, 0x68, 0xae, 0xec, 0x3f, 0x7d, 0xbb, 0xf9}}, + {[16]byte{0x00, 0xaa, 0x49, 0xe3, 0x92, 0x38, 0xdb, 0x71, 0x39, 0x93, 0x70, 0xda, 0xab, 0x01, 0xe2, 0x48}, [16]byte{0x00, 0x72, 0xe4, 0x96, 0xd5, 0xa7, 0x31, 0x43, 0xb7, 0xc5, 0x53, 0x21, 0x62, 0x10, 0x86, 0xf4}}, + {[16]byte{0x00, 0xab, 0x4b, 0xe0, 0x96, 0x3d, 0xdd, 0x76, 0x31, 0x9a, 0x7a, 0xd1, 0xa7, 0x0c, 0xec, 0x47}, [16]byte{0x00, 0x62, 0xc4, 0xa6, 0x95, 0xf7, 0x51, 0x33, 0x37, 0x55, 0xf3, 0x91, 0xa2, 0xc0, 0x66, 0x04}}, + {[16]byte{0x00, 0xac, 0x45, 0xe9, 0x8a, 0x26, 0xcf, 0x63, 0x09, 0xa5, 0x4c, 0xe0, 0x83, 0x2f, 0xc6, 0x6a}, [16]byte{0x00, 0x12, 0x24, 0x36, 0x48, 0x5a, 0x6c, 0x7e, 0x90, 0x82, 0xb4, 0xa6, 0xd8, 0xca, 0xfc, 0xee}}, + {[16]byte{0x00, 0xad, 0x47, 0xea, 0x8e, 0x23, 0xc9, 0x64, 0x01, 0xac, 0x46, 0xeb, 0x8f, 0x22, 0xc8, 0x65}, [16]byte{0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e}}, + {[16]byte{0x00, 0xae, 0x41, 0xef, 0x82, 0x2c, 0xc3, 0x6d, 0x19, 0xb7, 0x58, 0xf6, 0x9b, 0x35, 0xda, 0x74}, [16]byte{0x00, 0x32, 0x64, 0x56, 0xc8, 0xfa, 0xac, 0x9e, 0x8d, 0xbf, 0xe9, 0xdb, 0x45, 0x77, 0x21, 0x13}}, + {[16]byte{0x00, 0xaf, 0x43, 0xec, 0x86, 0x29, 0xc5, 0x6a, 0x11, 0xbe, 0x52, 0xfd, 0x97, 0x38, 0xd4, 0x7b}, [16]byte{0x00, 0x22, 0x44, 0x66, 0x88, 0xaa, 0xcc, 0xee, 0x0d, 0x2f, 0x49, 0x6b, 0x85, 0xa7, 0xc1, 0xe3}}, + {[16]byte{0x00, 0xb0, 0x7d, 0xcd, 0xfa, 0x4a, 0x87, 0x37, 0xe9, 0x59, 0x94, 0x24, 0x13, 0xa3, 0x6e, 0xde}, [16]byte{0x00, 0xcf, 0x83, 0x4c, 0x1b, 0xd4, 0x98, 0x57, 0x36, 0xf9, 0xb5, 0x7a, 0x2d, 0xe2, 0xae, 0x61}}, + {[16]byte{0x00, 0xb1, 0x7f, 0xce, 0xfe, 0x4f, 0x81, 0x30, 0xe1, 0x50, 0x9e, 0x2f, 0x1f, 0xae, 0x60, 0xd1}, [16]byte{0x00, 0xdf, 0xa3, 0x7c, 0x5b, 0x84, 0xf8, 0x27, 0xb6, 0x69, 0x15, 0xca, 0xed, 0x32, 0x4e, 0x91}}, + {[16]byte{0x00, 0xb2, 0x79, 0xcb, 0xf2, 0x40, 0x8b, 0x39, 0xf9, 0x4b, 0x80, 0x32, 0x0b, 0xb9, 0x72, 0xc0}, [16]byte{0x00, 0xef, 0xc3, 0x2c, 0x9b, 0x74, 0x58, 0xb7, 0x2b, 0xc4, 0xe8, 0x07, 0xb0, 0x5f, 0x73, 0x9c}}, + {[16]byte{0x00, 0xb3, 0x7b, 0xc8, 0xf6, 0x45, 0x8d, 0x3e, 0xf1, 0x42, 0x8a, 0x39, 0x07, 0xb4, 0x7c, 0xcf}, [16]byte{0x00, 0xff, 0xe3, 0x1c, 0xdb, 0x24, 0x38, 0xc7, 0xab, 0x54, 0x48, 0xb7, 0x70, 0x8f, 0x93, 0x6c}}, + {[16]byte{0x00, 0xb4, 0x75, 0xc1, 0xea, 0x5e, 0x9f, 0x2b, 0xc9, 0x7d, 0xbc, 0x08, 0x23, 0x97, 0x56, 0xe2}, [16]byte{0x00, 0x8f, 0x03, 0x8c, 0x06, 0x89, 0x05, 0x8a, 0x0c, 0x83, 0x0f, 0x80, 0x0a, 0x85, 0x09, 0x86}}, + {[16]byte{0x00, 0xb5, 0x77, 0xc2, 0xee, 0x5b, 0x99, 0x2c, 0xc1, 0x74, 0xb6, 0x03, 0x2f, 0x9a, 0x58, 0xed}, [16]byte{0x00, 0x9f, 0x23, 0xbc, 0x46, 0xd9, 0x65, 0xfa, 0x8c, 0x13, 0xaf, 0x30, 0xca, 0x55, 0xe9, 0x76}}, + {[16]byte{0x00, 0xb6, 0x71, 0xc7, 0xe2, 0x54, 0x93, 0x25, 0xd9, 0x6f, 0xa8, 0x1e, 0x3b, 0x8d, 0x4a, 0xfc}, [16]byte{0x00, 0xaf, 0x43, 0xec, 0x86, 0x29, 0xc5, 0x6a, 0x11, 0xbe, 0x52, 0xfd, 0x97, 0x38, 0xd4, 0x7b}}, + {[16]byte{0x00, 0xb7, 0x73, 0xc4, 0xe6, 0x51, 0x95, 0x22, 0xd1, 0x66, 0xa2, 0x15, 0x37, 0x80, 0x44, 0xf3}, [16]byte{0x00, 0xbf, 0x63, 0xdc, 0xc6, 0x79, 0xa5, 0x1a, 0x91, 0x2e, 0xf2, 0x4d, 0x57, 0xe8, 0x34, 0x8b}}, + {[16]byte{0x00, 0xb8, 0x6d, 0xd5, 0xda, 0x62, 0xb7, 0x0f, 0xa9, 0x11, 0xc4, 0x7c, 0x73, 0xcb, 0x1e, 0xa6}, [16]byte{0x00, 0x4f, 0x9e, 0xd1, 0x21, 0x6e, 0xbf, 0xf0, 0x42, 0x0d, 0xdc, 0x93, 0x63, 0x2c, 0xfd, 0xb2}}, + {[16]byte{0x00, 0xb9, 0x6f, 0xd6, 0xde, 0x67, 0xb1, 0x08, 0xa1, 0x18, 0xce, 0x77, 0x7f, 0xc6, 0x10, 0xa9}, [16]byte{0x00, 0x5f, 0xbe, 0xe1, 0x61, 0x3e, 0xdf, 0x80, 0xc2, 0x9d, 0x7c, 0x23, 0xa3, 0xfc, 0x1d, 0x42}}, + {[16]byte{0x00, 0xba, 0x69, 0xd3, 0xd2, 0x68, 0xbb, 0x01, 0xb9, 0x03, 0xd0, 0x6a, 0x6b, 0xd1, 0x02, 0xb8}, [16]byte{0x00, 0x6f, 0xde, 0xb1, 0xa1, 0xce, 0x7f, 0x10, 0x5f, 0x30, 0x81, 0xee, 0xfe, 0x91, 0x20, 0x4f}}, + {[16]byte{0x00, 0xbb, 0x6b, 0xd0, 0xd6, 0x6d, 0xbd, 0x06, 0xb1, 0x0a, 0xda, 0x61, 0x67, 0xdc, 0x0c, 0xb7}, [16]byte{0x00, 0x7f, 0xfe, 0x81, 0xe1, 0x9e, 0x1f, 0x60, 0xdf, 0xa0, 0x21, 0x5e, 0x3e, 0x41, 0xc0, 0xbf}}, + {[16]byte{0x00, 0xbc, 0x65, 0xd9, 0xca, 0x76, 0xaf, 0x13, 0x89, 0x35, 0xec, 0x50, 0x43, 0xff, 0x26, 0x9a}, [16]byte{0x00, 0x0f, 0x1e, 0x11, 0x3c, 0x33, 0x22, 0x2d, 0x78, 0x77, 0x66, 0x69, 0x44, 0x4b, 0x5a, 0x55}}, + {[16]byte{0x00, 0xbd, 0x67, 0xda, 0xce, 0x73, 0xa9, 0x14, 0x81, 0x3c, 0xe6, 0x5b, 0x4f, 0xf2, 0x28, 0x95}, [16]byte{0x00, 0x1f, 0x3e, 0x21, 0x7c, 0x63, 0x42, 0x5d, 0xf8, 0xe7, 0xc6, 0xd9, 0x84, 0x9b, 0xba, 0xa5}}, + {[16]byte{0x00, 0xbe, 0x61, 0xdf, 0xc2, 0x7c, 0xa3, 0x1d, 0x99, 0x27, 0xf8, 0x46, 0x5b, 0xe5, 0x3a, 0x84}, [16]byte{0x00, 0x2f, 0x5e, 0x71, 0xbc, 0x93, 0xe2, 0xcd, 0x65, 0x4a, 0x3b, 0x14, 0xd9, 0xf6, 0x87, 0xa8}}, + {[16]byte{0x00, 0xbf, 0x63, 0xdc, 0xc6, 0x79, 0xa5, 0x1a, 0x91, 0x2e, 0xf2, 0x4d, 0x57, 0xe8, 0x34, 0x8b}, [16]byte{0x00, 0x3f, 0x7e, 0x41, 0xfc, 0xc3, 0x82, 0xbd, 0xe5, 0xda, 0x9b, 0xa4, 0x19, 0x26, 0x67, 0x58}}, + {[16]byte{0x00, 0xc0, 0x9d, 0x5d, 0x27, 0xe7, 0xba, 0x7a, 0x4e, 0x8e, 0xd3, 0x13, 0x69, 0xa9, 0xf4, 0x34}, [16]byte{0x00, 0x9c, 0x25, 0xb9, 0x4a, 0xd6, 0x6f, 0xf3, 0x94, 0x08, 0xb1, 0x2d, 0xde, 0x42, 0xfb, 0x67}}, + {[16]byte{0x00, 0xc1, 0x9f, 0x5e, 0x23, 0xe2, 0xbc, 0x7d, 0x46, 0x87, 0xd9, 0x18, 0x65, 0xa4, 0xfa, 0x3b}, [16]byte{0x00, 0x8c, 0x05, 0x89, 0x0a, 0x86, 0x0f, 0x83, 0x14, 0x98, 0x11, 0x9d, 0x1e, 0x92, 0x1b, 0x97}}, + {[16]byte{0x00, 0xc2, 0x99, 0x5b, 0x2f, 0xed, 0xb6, 0x74, 0x5e, 0x9c, 0xc7, 0x05, 0x71, 0xb3, 0xe8, 0x2a}, [16]byte{0x00, 0xbc, 0x65, 0xd9, 0xca, 0x76, 0xaf, 0x13, 0x89, 0x35, 0xec, 0x50, 0x43, 0xff, 0x26, 0x9a}}, + {[16]byte{0x00, 0xc3, 0x9b, 0x58, 0x2b, 0xe8, 0xb0, 0x73, 0x56, 0x95, 0xcd, 0x0e, 0x7d, 0xbe, 0xe6, 0x25}, [16]byte{0x00, 0xac, 0x45, 0xe9, 0x8a, 0x26, 0xcf, 0x63, 0x09, 0xa5, 0x4c, 0xe0, 0x83, 0x2f, 0xc6, 0x6a}}, + {[16]byte{0x00, 0xc4, 0x95, 0x51, 0x37, 0xf3, 0xa2, 0x66, 0x6e, 0xaa, 0xfb, 0x3f, 0x59, 0x9d, 0xcc, 0x08}, [16]byte{0x00, 0xdc, 0xa5, 0x79, 0x57, 0x8b, 0xf2, 0x2e, 0xae, 0x72, 0x0b, 0xd7, 0xf9, 0x25, 0x5c, 0x80}}, + {[16]byte{0x00, 0xc5, 0x97, 0x52, 0x33, 0xf6, 0xa4, 0x61, 0x66, 0xa3, 0xf1, 0x34, 0x55, 0x90, 0xc2, 0x07}, [16]byte{0x00, 0xcc, 0x85, 0x49, 0x17, 0xdb, 0x92, 0x5e, 0x2e, 0xe2, 0xab, 0x67, 0x39, 0xf5, 0xbc, 0x70}}, + {[16]byte{0x00, 0xc6, 0x91, 0x57, 0x3f, 0xf9, 0xae, 0x68, 0x7e, 0xb8, 0xef, 0x29, 0x41, 0x87, 0xd0, 0x16}, [16]byte{0x00, 0xfc, 0xe5, 0x19, 0xd7, 0x2b, 0x32, 0xce, 0xb3, 0x4f, 0x56, 0xaa, 0x64, 0x98, 0x81, 0x7d}}, + {[16]byte{0x00, 0xc7, 0x93, 0x54, 0x3b, 0xfc, 0xa8, 0x6f, 0x76, 0xb1, 0xe5, 0x22, 0x4d, 0x8a, 0xde, 0x19}, [16]byte{0x00, 0xec, 0xc5, 0x29, 0x97, 0x7b, 0x52, 0xbe, 0x33, 0xdf, 0xf6, 0x1a, 0xa4, 0x48, 0x61, 0x8d}}, + {[16]byte{0x00, 0xc8, 0x8d, 0x45, 0x07, 0xcf, 0x8a, 0x42, 0x0e, 0xc6, 0x83, 0x4b, 0x09, 0xc1, 0x84, 0x4c}, [16]byte{0x00, 0x1c, 0x38, 0x24, 0x70, 0x6c, 0x48, 0x54, 0xe0, 0xfc, 0xd8, 0xc4, 0x90, 0x8c, 0xa8, 0xb4}}, + {[16]byte{0x00, 0xc9, 0x8f, 0x46, 0x03, 0xca, 0x8c, 0x45, 0x06, 0xcf, 0x89, 0x40, 0x05, 0xcc, 0x8a, 0x43}, [16]byte{0x00, 0x0c, 0x18, 0x14, 0x30, 0x3c, 0x28, 0x24, 0x60, 0x6c, 0x78, 0x74, 0x50, 0x5c, 0x48, 0x44}}, + {[16]byte{0x00, 0xca, 0x89, 0x43, 0x0f, 0xc5, 0x86, 0x4c, 0x1e, 0xd4, 0x97, 0x5d, 0x11, 0xdb, 0x98, 0x52}, [16]byte{0x00, 0x3c, 0x78, 0x44, 0xf0, 0xcc, 0x88, 0xb4, 0xfd, 0xc1, 0x85, 0xb9, 0x0d, 0x31, 0x75, 0x49}}, + {[16]byte{0x00, 0xcb, 0x8b, 0x40, 0x0b, 0xc0, 0x80, 0x4b, 0x16, 0xdd, 0x9d, 0x56, 0x1d, 0xd6, 0x96, 0x5d}, [16]byte{0x00, 0x2c, 0x58, 0x74, 0xb0, 0x9c, 0xe8, 0xc4, 0x7d, 0x51, 0x25, 0x09, 0xcd, 0xe1, 0x95, 0xb9}}, + {[16]byte{0x00, 0xcc, 0x85, 0x49, 0x17, 0xdb, 0x92, 0x5e, 0x2e, 0xe2, 0xab, 0x67, 0x39, 0xf5, 0xbc, 0x70}, [16]byte{0x00, 0x5c, 0xb8, 0xe4, 0x6d, 0x31, 0xd5, 0x89, 0xda, 0x86, 0x62, 0x3e, 0xb7, 0xeb, 0x0f, 0x53}}, + {[16]byte{0x00, 0xcd, 0x87, 0x4a, 0x13, 0xde, 0x94, 0x59, 0x26, 0xeb, 0xa1, 0x6c, 0x35, 0xf8, 0xb2, 0x7f}, [16]byte{0x00, 0x4c, 0x98, 0xd4, 0x2d, 0x61, 0xb5, 0xf9, 0x5a, 0x16, 0xc2, 0x8e, 0x77, 0x3b, 0xef, 0xa3}}, + {[16]byte{0x00, 0xce, 0x81, 0x4f, 0x1f, 0xd1, 0x9e, 0x50, 0x3e, 0xf0, 0xbf, 0x71, 0x21, 0xef, 0xa0, 0x6e}, [16]byte{0x00, 0x7c, 0xf8, 0x84, 0xed, 0x91, 0x15, 0x69, 0xc7, 0xbb, 0x3f, 0x43, 0x2a, 0x56, 0xd2, 0xae}}, + {[16]byte{0x00, 0xcf, 0x83, 0x4c, 0x1b, 0xd4, 0x98, 0x57, 0x36, 0xf9, 0xb5, 0x7a, 0x2d, 0xe2, 0xae, 0x61}, [16]byte{0x00, 0x6c, 0xd8, 0xb4, 0xad, 0xc1, 0x75, 0x19, 0x47, 0x2b, 0x9f, 0xf3, 0xea, 0x86, 0x32, 0x5e}}, + {[16]byte{0x00, 0xd0, 0xbd, 0x6d, 0x67, 0xb7, 0xda, 0x0a, 0xce, 0x1e, 0x73, 0xa3, 0xa9, 0x79, 0x14, 0xc4}, [16]byte{0x00, 0x81, 0x1f, 0x9e, 0x3e, 0xbf, 0x21, 0xa0, 0x7c, 0xfd, 0x63, 0xe2, 0x42, 0xc3, 0x5d, 0xdc}}, + {[16]byte{0x00, 0xd1, 0xbf, 0x6e, 0x63, 0xb2, 0xdc, 0x0d, 0xc6, 0x17, 0x79, 0xa8, 0xa5, 0x74, 0x1a, 0xcb}, [16]byte{0x00, 0x91, 0x3f, 0xae, 0x7e, 0xef, 0x41, 0xd0, 0xfc, 0x6d, 0xc3, 0x52, 0x82, 0x13, 0xbd, 0x2c}}, + {[16]byte{0x00, 0xd2, 0xb9, 0x6b, 0x6f, 0xbd, 0xd6, 0x04, 0xde, 0x0c, 0x67, 0xb5, 0xb1, 0x63, 0x08, 0xda}, [16]byte{0x00, 0xa1, 0x5f, 0xfe, 0xbe, 0x1f, 0xe1, 0x40, 0x61, 0xc0, 0x3e, 0x9f, 0xdf, 0x7e, 0x80, 0x21}}, + {[16]byte{0x00, 0xd3, 0xbb, 0x68, 0x6b, 0xb8, 0xd0, 0x03, 0xd6, 0x05, 0x6d, 0xbe, 0xbd, 0x6e, 0x06, 0xd5}, [16]byte{0x00, 0xb1, 0x7f, 0xce, 0xfe, 0x4f, 0x81, 0x30, 0xe1, 0x50, 0x9e, 0x2f, 0x1f, 0xae, 0x60, 0xd1}}, + {[16]byte{0x00, 0xd4, 0xb5, 0x61, 0x77, 0xa3, 0xc2, 0x16, 0xee, 0x3a, 0x5b, 0x8f, 0x99, 0x4d, 0x2c, 0xf8}, [16]byte{0x00, 0xc1, 0x9f, 0x5e, 0x23, 0xe2, 0xbc, 0x7d, 0x46, 0x87, 0xd9, 0x18, 0x65, 0xa4, 0xfa, 0x3b}}, + {[16]byte{0x00, 0xd5, 0xb7, 0x62, 0x73, 0xa6, 0xc4, 0x11, 0xe6, 0x33, 0x51, 0x84, 0x95, 0x40, 0x22, 0xf7}, [16]byte{0x00, 0xd1, 0xbf, 0x6e, 0x63, 0xb2, 0xdc, 0x0d, 0xc6, 0x17, 0x79, 0xa8, 0xa5, 0x74, 0x1a, 0xcb}}, + {[16]byte{0x00, 0xd6, 0xb1, 0x67, 0x7f, 0xa9, 0xce, 0x18, 0xfe, 0x28, 0x4f, 0x99, 0x81, 0x57, 0x30, 0xe6}, [16]byte{0x00, 0xe1, 0xdf, 0x3e, 0xa3, 0x42, 0x7c, 0x9d, 0x5b, 0xba, 0x84, 0x65, 0xf8, 0x19, 0x27, 0xc6}}, + {[16]byte{0x00, 0xd7, 0xb3, 0x64, 0x7b, 0xac, 0xc8, 0x1f, 0xf6, 0x21, 0x45, 0x92, 0x8d, 0x5a, 0x3e, 0xe9}, [16]byte{0x00, 0xf1, 0xff, 0x0e, 0xe3, 0x12, 0x1c, 0xed, 0xdb, 0x2a, 0x24, 0xd5, 0x38, 0xc9, 0xc7, 0x36}}, + {[16]byte{0x00, 0xd8, 0xad, 0x75, 0x47, 0x9f, 0xea, 0x32, 0x8e, 0x56, 0x23, 0xfb, 0xc9, 0x11, 0x64, 0xbc}, [16]byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}}, + {[16]byte{0x00, 0xd9, 0xaf, 0x76, 0x43, 0x9a, 0xec, 0x35, 0x86, 0x5f, 0x29, 0xf0, 0xc5, 0x1c, 0x6a, 0xb3}, [16]byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}}, + {[16]byte{0x00, 0xda, 0xa9, 0x73, 0x4f, 0x95, 0xe6, 0x3c, 0x9e, 0x44, 0x37, 0xed, 0xd1, 0x0b, 0x78, 0xa2}, [16]byte{0x00, 0x21, 0x42, 0x63, 0x84, 0xa5, 0xc6, 0xe7, 0x15, 0x34, 0x57, 0x76, 0x91, 0xb0, 0xd3, 0xf2}}, + {[16]byte{0x00, 0xdb, 0xab, 0x70, 0x4b, 0x90, 0xe0, 0x3b, 0x96, 0x4d, 0x3d, 0xe6, 0xdd, 0x06, 0x76, 0xad}, [16]byte{0x00, 0x31, 0x62, 0x53, 0xc4, 0xf5, 0xa6, 0x97, 0x95, 0xa4, 0xf7, 0xc6, 0x51, 0x60, 0x33, 0x02}}, + {[16]byte{0x00, 0xdc, 0xa5, 0x79, 0x57, 0x8b, 0xf2, 0x2e, 0xae, 0x72, 0x0b, 0xd7, 0xf9, 0x25, 0x5c, 0x80}, [16]byte{0x00, 0x41, 0x82, 0xc3, 0x19, 0x58, 0x9b, 0xda, 0x32, 0x73, 0xb0, 0xf1, 0x2b, 0x6a, 0xa9, 0xe8}}, + {[16]byte{0x00, 0xdd, 0xa7, 0x7a, 0x53, 0x8e, 0xf4, 0x29, 0xa6, 0x7b, 0x01, 0xdc, 0xf5, 0x28, 0x52, 0x8f}, [16]byte{0x00, 0x51, 0xa2, 0xf3, 0x59, 0x08, 0xfb, 0xaa, 0xb2, 0xe3, 0x10, 0x41, 0xeb, 0xba, 0x49, 0x18}}, + {[16]byte{0x00, 0xde, 0xa1, 0x7f, 0x5f, 0x81, 0xfe, 0x20, 0xbe, 0x60, 0x1f, 0xc1, 0xe1, 0x3f, 0x40, 0x9e}, [16]byte{0x00, 0x61, 0xc2, 0xa3, 0x99, 0xf8, 0x5b, 0x3a, 0x2f, 0x4e, 0xed, 0x8c, 0xb6, 0xd7, 0x74, 0x15}}, + {[16]byte{0x00, 0xdf, 0xa3, 0x7c, 0x5b, 0x84, 0xf8, 0x27, 0xb6, 0x69, 0x15, 0xca, 0xed, 0x32, 0x4e, 0x91}, [16]byte{0x00, 0x71, 0xe2, 0x93, 0xd9, 0xa8, 0x3b, 0x4a, 0xaf, 0xde, 0x4d, 0x3c, 0x76, 0x07, 0x94, 0xe5}}, + {[16]byte{0x00, 0xe0, 0xdd, 0x3d, 0xa7, 0x47, 0x7a, 0x9a, 0x53, 0xb3, 0x8e, 0x6e, 0xf4, 0x14, 0x29, 0xc9}, [16]byte{0x00, 0xa6, 0x51, 0xf7, 0xa2, 0x04, 0xf3, 0x55, 0x59, 0xff, 0x08, 0xae, 0xfb, 0x5d, 0xaa, 0x0c}}, + {[16]byte{0x00, 0xe1, 0xdf, 0x3e, 0xa3, 0x42, 0x7c, 0x9d, 0x5b, 0xba, 0x84, 0x65, 0xf8, 0x19, 0x27, 0xc6}, [16]byte{0x00, 0xb6, 0x71, 0xc7, 0xe2, 0x54, 0x93, 0x25, 0xd9, 0x6f, 0xa8, 0x1e, 0x3b, 0x8d, 0x4a, 0xfc}}, + {[16]byte{0x00, 0xe2, 0xd9, 0x3b, 0xaf, 0x4d, 0x76, 0x94, 0x43, 0xa1, 0x9a, 0x78, 0xec, 0x0e, 0x35, 0xd7}, [16]byte{0x00, 0x86, 0x11, 0x97, 0x22, 0xa4, 0x33, 0xb5, 0x44, 0xc2, 0x55, 0xd3, 0x66, 0xe0, 0x77, 0xf1}}, + {[16]byte{0x00, 0xe3, 0xdb, 0x38, 0xab, 0x48, 0x70, 0x93, 0x4b, 0xa8, 0x90, 0x73, 0xe0, 0x03, 0x3b, 0xd8}, [16]byte{0x00, 0x96, 0x31, 0xa7, 0x62, 0xf4, 0x53, 0xc5, 0xc4, 0x52, 0xf5, 0x63, 0xa6, 0x30, 0x97, 0x01}}, + {[16]byte{0x00, 0xe4, 0xd5, 0x31, 0xb7, 0x53, 0x62, 0x86, 0x73, 0x97, 0xa6, 0x42, 0xc4, 0x20, 0x11, 0xf5}, [16]byte{0x00, 0xe6, 0xd1, 0x37, 0xbf, 0x59, 0x6e, 0x88, 0x63, 0x85, 0xb2, 0x54, 0xdc, 0x3a, 0x0d, 0xeb}}, + {[16]byte{0x00, 0xe5, 0xd7, 0x32, 0xb3, 0x56, 0x64, 0x81, 0x7b, 0x9e, 0xac, 0x49, 0xc8, 0x2d, 0x1f, 0xfa}, [16]byte{0x00, 0xf6, 0xf1, 0x07, 0xff, 0x09, 0x0e, 0xf8, 0xe3, 0x15, 0x12, 0xe4, 0x1c, 0xea, 0xed, 0x1b}}, + {[16]byte{0x00, 0xe6, 0xd1, 0x37, 0xbf, 0x59, 0x6e, 0x88, 0x63, 0x85, 0xb2, 0x54, 0xdc, 0x3a, 0x0d, 0xeb}, [16]byte{0x00, 0xc6, 0x91, 0x57, 0x3f, 0xf9, 0xae, 0x68, 0x7e, 0xb8, 0xef, 0x29, 0x41, 0x87, 0xd0, 0x16}}, + {[16]byte{0x00, 0xe7, 0xd3, 0x34, 0xbb, 0x5c, 0x68, 0x8f, 0x6b, 0x8c, 0xb8, 0x5f, 0xd0, 0x37, 0x03, 0xe4}, [16]byte{0x00, 0xd6, 0xb1, 0x67, 0x7f, 0xa9, 0xce, 0x18, 0xfe, 0x28, 0x4f, 0x99, 0x81, 0x57, 0x30, 0xe6}}, + {[16]byte{0x00, 0xe8, 0xcd, 0x25, 0x87, 0x6f, 0x4a, 0xa2, 0x13, 0xfb, 0xde, 0x36, 0x94, 0x7c, 0x59, 0xb1}, [16]byte{0x00, 0x26, 0x4c, 0x6a, 0x98, 0xbe, 0xd4, 0xf2, 0x2d, 0x0b, 0x61, 0x47, 0xb5, 0x93, 0xf9, 0xdf}}, + {[16]byte{0x00, 0xe9, 0xcf, 0x26, 0x83, 0x6a, 0x4c, 0xa5, 0x1b, 0xf2, 0xd4, 0x3d, 0x98, 0x71, 0x57, 0xbe}, [16]byte{0x00, 0x36, 0x6c, 0x5a, 0xd8, 0xee, 0xb4, 0x82, 0xad, 0x9b, 0xc1, 0xf7, 0x75, 0x43, 0x19, 0x2f}}, + {[16]byte{0x00, 0xea, 0xc9, 0x23, 0x8f, 0x65, 0x46, 0xac, 0x03, 0xe9, 0xca, 0x20, 0x8c, 0x66, 0x45, 0xaf}, [16]byte{0x00, 0x06, 0x0c, 0x0a, 0x18, 0x1e, 0x14, 0x12, 0x30, 0x36, 0x3c, 0x3a, 0x28, 0x2e, 0x24, 0x22}}, + {[16]byte{0x00, 0xeb, 0xcb, 0x20, 0x8b, 0x60, 0x40, 0xab, 0x0b, 0xe0, 0xc0, 0x2b, 0x80, 0x6b, 0x4b, 0xa0}, [16]byte{0x00, 0x16, 0x2c, 0x3a, 0x58, 0x4e, 0x74, 0x62, 0xb0, 0xa6, 0x9c, 0x8a, 0xe8, 0xfe, 0xc4, 0xd2}}, + {[16]byte{0x00, 0xec, 0xc5, 0x29, 0x97, 0x7b, 0x52, 0xbe, 0x33, 0xdf, 0xf6, 0x1a, 0xa4, 0x48, 0x61, 0x8d}, [16]byte{0x00, 0x66, 0xcc, 0xaa, 0x85, 0xe3, 0x49, 0x2f, 0x17, 0x71, 0xdb, 0xbd, 0x92, 0xf4, 0x5e, 0x38}}, + {[16]byte{0x00, 0xed, 0xc7, 0x2a, 0x93, 0x7e, 0x54, 0xb9, 0x3b, 0xd6, 0xfc, 0x11, 0xa8, 0x45, 0x6f, 0x82}, [16]byte{0x00, 0x76, 0xec, 0x9a, 0xc5, 0xb3, 0x29, 0x5f, 0x97, 0xe1, 0x7b, 0x0d, 0x52, 0x24, 0xbe, 0xc8}}, + {[16]byte{0x00, 0xee, 0xc1, 0x2f, 0x9f, 0x71, 0x5e, 0xb0, 0x23, 0xcd, 0xe2, 0x0c, 0xbc, 0x52, 0x7d, 0x93}, [16]byte{0x00, 0x46, 0x8c, 0xca, 0x05, 0x43, 0x89, 0xcf, 0x0a, 0x4c, 0x86, 0xc0, 0x0f, 0x49, 0x83, 0xc5}}, + {[16]byte{0x00, 0xef, 0xc3, 0x2c, 0x9b, 0x74, 0x58, 0xb7, 0x2b, 0xc4, 0xe8, 0x07, 0xb0, 0x5f, 0x73, 0x9c}, [16]byte{0x00, 0x56, 0xac, 0xfa, 0x45, 0x13, 0xe9, 0xbf, 0x8a, 0xdc, 0x26, 0x70, 0xcf, 0x99, 0x63, 0x35}}, + {[16]byte{0x00, 0xf0, 0xfd, 0x0d, 0xe7, 0x17, 0x1a, 0xea, 0xd3, 0x23, 0x2e, 0xde, 0x34, 0xc4, 0xc9, 0x39}, [16]byte{0x00, 0xbb, 0x6b, 0xd0, 0xd6, 0x6d, 0xbd, 0x06, 0xb1, 0x0a, 0xda, 0x61, 0x67, 0xdc, 0x0c, 0xb7}}, + {[16]byte{0x00, 0xf1, 0xff, 0x0e, 0xe3, 0x12, 0x1c, 0xed, 0xdb, 0x2a, 0x24, 0xd5, 0x38, 0xc9, 0xc7, 0x36}, [16]byte{0x00, 0xab, 0x4b, 0xe0, 0x96, 0x3d, 0xdd, 0x76, 0x31, 0x9a, 0x7a, 0xd1, 0xa7, 0x0c, 0xec, 0x47}}, + {[16]byte{0x00, 0xf2, 0xf9, 0x0b, 0xef, 0x1d, 0x16, 0xe4, 0xc3, 0x31, 0x3a, 0xc8, 0x2c, 0xde, 0xd5, 0x27}, [16]byte{0x00, 0x9b, 0x2b, 0xb0, 0x56, 0xcd, 0x7d, 0xe6, 0xac, 0x37, 0x87, 0x1c, 0xfa, 0x61, 0xd1, 0x4a}}, + {[16]byte{0x00, 0xf3, 0xfb, 0x08, 0xeb, 0x18, 0x10, 0xe3, 0xcb, 0x38, 0x30, 0xc3, 0x20, 0xd3, 0xdb, 0x28}, [16]byte{0x00, 0x8b, 0x0b, 0x80, 0x16, 0x9d, 0x1d, 0x96, 0x2c, 0xa7, 0x27, 0xac, 0x3a, 0xb1, 0x31, 0xba}}, + {[16]byte{0x00, 0xf4, 0xf5, 0x01, 0xf7, 0x03, 0x02, 0xf6, 0xf3, 0x07, 0x06, 0xf2, 0x04, 0xf0, 0xf1, 0x05}, [16]byte{0x00, 0xfb, 0xeb, 0x10, 0xcb, 0x30, 0x20, 0xdb, 0x8b, 0x70, 0x60, 0x9b, 0x40, 0xbb, 0xab, 0x50}}, + {[16]byte{0x00, 0xf5, 0xf7, 0x02, 0xf3, 0x06, 0x04, 0xf1, 0xfb, 0x0e, 0x0c, 0xf9, 0x08, 0xfd, 0xff, 0x0a}, [16]byte{0x00, 0xeb, 0xcb, 0x20, 0x8b, 0x60, 0x40, 0xab, 0x0b, 0xe0, 0xc0, 0x2b, 0x80, 0x6b, 0x4b, 0xa0}}, + {[16]byte{0x00, 0xf6, 0xf1, 0x07, 0xff, 0x09, 0x0e, 0xf8, 0xe3, 0x15, 0x12, 0xe4, 0x1c, 0xea, 0xed, 0x1b}, [16]byte{0x00, 0xdb, 0xab, 0x70, 0x4b, 0x90, 0xe0, 0x3b, 0x96, 0x4d, 0x3d, 0xe6, 0xdd, 0x06, 0x76, 0xad}}, + {[16]byte{0x00, 0xf7, 0xf3, 0x04, 0xfb, 0x0c, 0x08, 0xff, 0xeb, 0x1c, 0x18, 0xef, 0x10, 0xe7, 0xe3, 0x14}, [16]byte{0x00, 0xcb, 0x8b, 0x40, 0x0b, 0xc0, 0x80, 0x4b, 0x16, 0xdd, 0x9d, 0x56, 0x1d, 0xd6, 0x96, 0x5d}}, + {[16]byte{0x00, 0xf8, 0xed, 0x15, 0xc7, 0x3f, 0x2a, 0xd2, 0x93, 0x6b, 0x7e, 0x86, 0x54, 0xac, 0xb9, 0x41}, [16]byte{0x00, 0x3b, 0x76, 0x4d, 0xec, 0xd7, 0x9a, 0xa1, 0xc5, 0xfe, 0xb3, 0x88, 0x29, 0x12, 0x5f, 0x64}}, + {[16]byte{0x00, 0xf9, 0xef, 0x16, 0xc3, 0x3a, 0x2c, 0xd5, 0x9b, 0x62, 0x74, 0x8d, 0x58, 0xa1, 0xb7, 0x4e}, [16]byte{0x00, 0x2b, 0x56, 0x7d, 0xac, 0x87, 0xfa, 0xd1, 0x45, 0x6e, 0x13, 0x38, 0xe9, 0xc2, 0xbf, 0x94}}, + {[16]byte{0x00, 0xfa, 0xe9, 0x13, 0xcf, 0x35, 0x26, 0xdc, 0x83, 0x79, 0x6a, 0x90, 0x4c, 0xb6, 0xa5, 0x5f}, [16]byte{0x00, 0x1b, 0x36, 0x2d, 0x6c, 0x77, 0x5a, 0x41, 0xd8, 0xc3, 0xee, 0xf5, 0xb4, 0xaf, 0x82, 0x99}}, + {[16]byte{0x00, 0xfb, 0xeb, 0x10, 0xcb, 0x30, 0x20, 0xdb, 0x8b, 0x70, 0x60, 0x9b, 0x40, 0xbb, 0xab, 0x50}, [16]byte{0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69}}, + {[16]byte{0x00, 0xfc, 0xe5, 0x19, 0xd7, 0x2b, 0x32, 0xce, 0xb3, 0x4f, 0x56, 0xaa, 0x64, 0x98, 0x81, 0x7d}, [16]byte{0x00, 0x7b, 0xf6, 0x8d, 0xf1, 0x8a, 0x07, 0x7c, 0xff, 0x84, 0x09, 0x72, 0x0e, 0x75, 0xf8, 0x83}}, + {[16]byte{0x00, 0xfd, 0xe7, 0x1a, 0xd3, 0x2e, 0x34, 0xc9, 0xbb, 0x46, 0x5c, 0xa1, 0x68, 0x95, 0x8f, 0x72}, [16]byte{0x00, 0x6b, 0xd6, 0xbd, 0xb1, 0xda, 0x67, 0x0c, 0x7f, 0x14, 0xa9, 0xc2, 0xce, 0xa5, 0x18, 0x73}}, + {[16]byte{0x00, 0xfe, 0xe1, 0x1f, 0xdf, 0x21, 0x3e, 0xc0, 0xa3, 0x5d, 0x42, 0xbc, 0x7c, 0x82, 0x9d, 0x63}, [16]byte{0x00, 0x5b, 0xb6, 0xed, 0x71, 0x2a, 0xc7, 0x9c, 0xe2, 0xb9, 0x54, 0x0f, 0x93, 0xc8, 0x25, 0x7e}}, + {[16]byte{0x00, 0xff, 0xe3, 0x1c, 0xdb, 0x24, 0x38, 0xc7, 0xab, 0x54, 0x48, 0xb7, 0x70, 0x8f, 0x93, 0x6c}, [16]byte{0x00, 0x4b, 0x96, 0xdd, 0x31, 0x7a, 0xa7, 0xec, 0x62, 0x29, 0xf4, 0xbf, 0x53, 0x18, 0xc5, 0x8e}}, +} diff --git a/vendor/github.com/vivint/infectious/berlekamp_welch.go b/vendor/github.com/vivint/infectious/berlekamp_welch.go new file mode 100644 index 000000000..8fa1ad7e8 --- /dev/null +++ b/vendor/github.com/vivint/infectious/berlekamp_welch.go @@ -0,0 +1,245 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package infectious + +import ( + "sort" +) + +// Decode will take a destination buffer (can be nil) and a list of shares +// (pieces). It will return the data passed in to the corresponding Encode +// call or return an error. +// +// It will first correct the shares using Correct, mutating and reordering the +// passed-in shares arguments. Then it will rebuild the data using Rebuild. +// Finally it will concatenate the data into the given output buffer dst if it +// has capacity, growing it otherwise. +// +// If you already know your data does not contain errors, Rebuild will be +// faster. +// +// If you only want to identify which pieces are bad, you may be interested in +// Correct. +// +// If you don't want the data concatenated for you, you can use Correct and +// then Rebuild individually. +func (f *FEC) Decode(dst []byte, shares []Share) ([]byte, error) { + err := f.Correct(shares) + if err != nil { + return nil, err + } + + if len(shares) == 0 { + return nil, Error.New("must specify at least one share") + } + piece_len := len(shares[0].Data) + result_len := piece_len * f.k + if cap(dst) < result_len { + dst = make([]byte, result_len) + } else { + dst = dst[:result_len] + } + + return dst, f.Rebuild(shares, func(s Share) { + copy(dst[s.Number*piece_len:], s.Data) + }) +} + +func (f *FEC) decode(shares []Share, output func(Share)) error { + err := f.Correct(shares) + if err != nil { + return err + } + return f.Rebuild(shares, output) +} + +// Correct implements the Berlekamp-Welch algorithm for correcting +// errors in given FEC encoded data. It will correct the supplied shares, +// mutating the underlying byte slices and reordering the shares +func (fc *FEC) Correct(shares []Share) error { + if len(shares) < fc.k { + return Error.New("must specify at least the number of required shares") + } + + sort.Sort(byNumber(shares)) + + // fast path: check to see if there are no errors by evaluating it with + // the syndrome matrix. + synd, err := fc.syndromeMatrix(shares) + if err != nil { + return err + } + buf := make([]byte, len(shares[0].Data)) + + for i := 0; i < synd.r; i++ { + for j := range buf { + buf[j] = 0 + } + + for j := 0; j < synd.c; j++ { + addmul(buf, shares[j].Data, byte(synd.get(i, j))) + } + + for j := range buf { + if buf[j] == 0 { + continue + } + data, err := fc.berlekampWelch(shares, j) + if err != nil { + return err + } + for _, share := range shares { + share.Data[j] = data[share.Number] + } + } + } + + return nil +} + +func (fc *FEC) berlekampWelch(shares []Share, index int) ([]byte, error) { + k := fc.k // required size + r := len(shares) // required + redundancy size + e := (r - k) / 2 // deg of E polynomial + q := e + k // def of Q polynomial + + if e <= 0 { + return nil, NotEnoughShares.New("") + } + + const interp_base = gfVal(2) + + eval_point := func(num int) gfVal { + if num == 0 { + return 0 + } + return interp_base.pow(num - 1) + } + + dim := q + e + + // build the system of equations s * u = f + s := matrixNew(dim, dim) // constraint matrix + a := matrixNew(dim, dim) // augmented matrix + f := make(gfVals, dim) // constant column vector + u := make(gfVals, dim) // solution vector + + for i := 0; i < dim; i++ { + x_i := eval_point(shares[i].Number) + r_i := gfConst(shares[i].Data[index]) + + f[i] = x_i.pow(e).mul(r_i) + + for j := 0; j < q; j++ { + s.set(i, j, x_i.pow(j)) + if i == j { + a.set(i, j, gfConst(1)) + } + } + + for k := 0; k < e; k++ { + j := k + q + + s.set(i, j, x_i.pow(k).mul(r_i)) + if i == j { + a.set(i, j, gfConst(1)) + } + } + } + + // invert and put the result in a + err := s.invertWith(a) + if err != nil { + return nil, err + } + + // multiply the inverted matrix by the column vector + for i := 0; i < dim; i++ { + ri := a.indexRow(i) + u[i] = ri.dot(f) + } + + // reverse u for easier construction of the polynomials + for i := 0; i < len(u)/2; i++ { + o := len(u) - i - 1 + u[i], u[o] = u[o], u[i] + } + + q_poly := gfPoly(u[e:]) + e_poly := append(gfPoly{gfConst(1)}, u[:e]...) + + p_poly, rem, err := q_poly.div(e_poly) + if err != nil { + return nil, err + } + + if !rem.isZero() { + return nil, TooManyErrors.New("") + } + + out := make([]byte, fc.n) + for i := range out { + pt := gfConst(0) + if i != 0 { + pt = interp_base.pow(i - 1) + } + out[i] = byte(p_poly.eval(pt)) + } + + return out, nil +} + +func (fc *FEC) syndromeMatrix(shares []Share) (gfMat, error) { + // get a list of keepers + keepers := make([]bool, fc.n) + shareCount := 0 + for _, share := range shares { + if !keepers[share.Number] { + keepers[share.Number] = true + shareCount++ + } + } + + // create a vandermonde matrix but skip columns where we're missing the + // share. + out := matrixNew(fc.k, shareCount) + for i := 0; i < fc.k; i++ { + skipped := 0 + for j := 0; j < fc.n; j++ { + if !keepers[j] { + skipped++ + continue + } + + out.set(i, j-skipped, gfConst(fc.vand_matrix[i*fc.n+j])) + } + } + + // standardize the output and convert into parity form + err := out.standardize() + if err != nil { + return gfMat{}, err + } + + return out.parity(), nil +} diff --git a/vendor/github.com/vivint/infectious/common.go b/vendor/github.com/vivint/infectious/common.go new file mode 100644 index 000000000..bb404a55c --- /dev/null +++ b/vendor/github.com/vivint/infectious/common.go @@ -0,0 +1,48 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +// Package infectious implements Reed-Solomon forward error correction [1]. It +// uses the Berlekamp-Welch [2] error correction algorithm to achieve the +// ability to actually correct errors. +// +// Caution: this package API leans toward providing the user more power and +// performance at the expense of having some really sharp edges! Read the +// documentation about memory lifecycles carefully! +// +// We wrote a blog post about how this library works! +// https://innovation.vivint.com/introduction-to-reed-solomon-bc264d0794f8 +// +// [1] https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction +// [2] https://en.wikipedia.org/wiki/Berlekamp%E2%80%93Welch_algorithm +package infectious + +import ( + "github.com/spacemonkeygo/errors" + "golang.org/x/sys/cpu" +) + +var Error = errors.NewClass("infectious") +var NotEnoughShares = Error.NewClass("not enough shares") +var TooManyErrors = Error.NewClass("too many errors") + +var hasAVX2 = cpu.X86.HasAVX2 +var hasSSSE3 = cpu.X86.HasSSSE3 diff --git a/vendor/github.com/vivint/infectious/fec.go b/vendor/github.com/vivint/infectious/fec.go new file mode 100644 index 000000000..06b2396dd --- /dev/null +++ b/vendor/github.com/vivint/infectious/fec.go @@ -0,0 +1,319 @@ +// (C) 1996-1998 Luigi Rizzo (luigi@iet.unipi.it) +// 2009-2010 Jack Lloyd (lloyd@randombit.net) +// 2011 Billy Brumley (billy.brumley@aalto.fi) +// 2016-2017 Vivint, Inc. (jeff.wendling@vivint.com) +// +// Portions derived from code by Phil Karn (karn@ka9q.ampr.org), +// Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari +// Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +package infectious + +import "sort" + +// FEC represents operations performed on a Reed-Solomon-based +// forward error correction code. Make sure to construct using NewFEC. +type FEC struct { + k int + n int + enc_matrix []byte + vand_matrix []byte +} + +// NewFEC creates a *FEC using k required pieces and n total pieces. +// Encoding data with this *FEC will generate n pieces, and decoding +// data requires k uncorrupted pieces. If during decode more than k pieces +// exist, corrupted data can be detected and recovered from. +func NewFEC(k, n int) (*FEC, error) { + if k <= 0 || n <= 0 || k > 256 || n > 256 || k > n { + return nil, Error.New("requires 1 <= k <= n <= 256") + } + + enc_matrix := make([]byte, n*k) + temp_matrix := make([]byte, n*k) + createInvertedVdm(temp_matrix, k) + + for i := k * k; i < len(temp_matrix); i++ { + temp_matrix[i] = gf_exp[((i/k)*(i%k))%255] + } + + for i := 0; i < k; i++ { + enc_matrix[i*(k+1)] = 1 + } + + for row := k * k; row < n*k; row += k { + for col := 0; col < k; col++ { + pa := temp_matrix[row:] + pb := temp_matrix[col:] + acc := byte(0) + for i := 0; i < k; i, pa, pb = i+1, pa[1:], pb[k:] { + acc ^= gf_mul_table[pa[0]][pb[0]] + } + enc_matrix[row+col] = acc + } + } + + // vand_matrix has more columns than rows + // k rows, n columns. + vand_matrix := make([]byte, k*n) + vand_matrix[0] = 1 + g := byte(1) + for row := 0; row < k; row++ { + a := byte(1) + for col := 1; col < n; col++ { + vand_matrix[row*n+col] = a // 2.pow(i * j) FIGURE IT OUT + a = gf_mul_table[g][a] + } + g = gf_mul_table[2][g] + } + + return &FEC{ + k: k, + n: n, + enc_matrix: enc_matrix, + vand_matrix: vand_matrix, + }, nil +} + +// Required returns the number of required pieces for reconstruction. This is +// the k value passed to NewFEC. +func (f *FEC) Required() int { + return f.k +} + +// Total returns the number of total pieces that will be generated during +// encoding. This is the n value passed to NewFEC. +func (f *FEC) Total() int { + return f.n +} + +// Encode will take input data and encode to the total number of pieces n this +// *FEC is configured for. It will call the callback output n times. +// +// The input data must be a multiple of the required number of pieces k. +// Padding to this multiple is up to the caller. +// +// Note that the byte slices in Shares passed to output may be reused when +// output returns. +func (f *FEC) Encode(input []byte, output func(Share)) error { + size := len(input) + + k := f.k + n := f.n + enc_matrix := f.enc_matrix + + if size%k != 0 { + return Error.New("input length must be a multiple of %d", k) + } + + block_size := size / k + + for i := 0; i < k; i++ { + output(Share{ + Number: i, + Data: input[i*block_size : i*block_size+block_size]}) + } + + fec_buf := make([]byte, block_size) + for i := k; i < n; i++ { + for j := range fec_buf { + fec_buf[j] = 0 + } + + for j := 0; j < k; j++ { + addmul(fec_buf, input[j*block_size:j*block_size+block_size], + enc_matrix[i*k+j]) + } + + output(Share{ + Number: i, + Data: fec_buf}) + } + return nil +} + +// EncodeSingle will take input data and encode it to output only for the num +// piece. +// +// The input data must be a multiple of the required number of pieces k. +// Padding to this multiple is up to the caller. +// +// The output must be exactly len(input) / k bytes. +// +// The num must be 0 <= num < n. +func (f *FEC) EncodeSingle(input, output []byte, num int) error { + size := len(input) + + k := f.k + n := f.n + enc_matrix := f.enc_matrix + + if num < 0 { + return Error.New("num must be non-negative") + } + + if num >= n { + return Error.New("num must be less than %d", n) + } + + if size%k != 0 { + return Error.New("input length must be a multiple of %d", k) + } + + block_size := size / k + + if len(output) != block_size { + return Error.New("output length must be %d", block_size) + } + + if num < k { + copy(output, input[num*block_size:]) + return nil + } + + for i := range output { + output[i] = 0 + } + + for i := 0; i < k; i++ { + addmul(output, input[i*block_size:i*block_size+block_size], + enc_matrix[num*k+i]) + } + + return nil +} + +// A Share represents a piece of the FEC-encoded data. +// Both fields are required. +type Share struct { + Number int + Data []byte +} + +// DeepCopy makes getting a deep copy of a Share easier. It will return an +// identical Share that uses all new memory locations. +func (s *Share) DeepCopy() (c Share) { + c.Number = s.Number + c.Data = append([]byte(nil), s.Data...) + return c +} + +type byNumber []Share + +func (b byNumber) Len() int { return len(b) } +func (b byNumber) Less(i int, j int) bool { return b[i].Number < b[j].Number } +func (b byNumber) Swap(i int, j int) { b[i], b[j] = b[j], b[i] } + +// Rebuild will take a list of corrected shares (pieces) and a callback output. +// output will be called k times ((*FEC).Required() times) with 1/k of the +// original data each time and the index of that data piece. +// Decode is usually preferred. +// +// Note that the data is not necessarily sent to output ordered by the piece +// number. +// +// Note that the byte slices in Shares passed to output may be reused when +// output returns. +// +// Rebuild assumes that you have already called Correct or did not need to. +func (f *FEC) Rebuild(shares []Share, output func(Share)) error { + k := f.k + n := f.n + enc_matrix := f.enc_matrix + + if len(shares) < k { + return NotEnoughShares.New("") + } + + share_size := len(shares[0].Data) + sort.Sort(byNumber(shares)) + + m_dec := make([]byte, k*k) + indexes := make([]int, k) + sharesv := make([][]byte, k) + + shares_b_iter := 0 + shares_e_iter := len(shares) - 1 + + for i := 0; i < k; i++ { + var share_id int + var share_data []byte + + if share := shares[shares_b_iter]; share.Number == i { + share_id = share.Number + share_data = share.Data + shares_b_iter++ + } else { + share := shares[shares_e_iter] + share_id = share.Number + share_data = share.Data + shares_e_iter-- + } + + if share_id >= n { + return Error.New("invalid share id: %d", share_id) + } + + if share_id < k { + m_dec[i*(k+1)] = 1 + if output != nil { + output(Share{ + Number: share_id, + Data: share_data}) + } + } else { + copy(m_dec[i*k:i*k+k], enc_matrix[share_id*k:]) + } + + sharesv[i] = share_data + indexes[i] = share_id + } + + if err := invertMatrix(m_dec, k); err != nil { + return err + } + + buf := make([]byte, share_size) + for i := 0; i < len(indexes); i++ { + if indexes[i] >= k { + for j := range buf { + buf[j] = 0 + } + + for col := 0; col < k; col++ { + addmul(buf, sharesv[col], m_dec[i*k+col]) + } + + if output != nil { + output(Share{ + Number: i, + Data: buf}) + } + } + } + return nil +} diff --git a/vendor/github.com/vivint/infectious/gf_alg.go b/vendor/github.com/vivint/infectious/gf_alg.go new file mode 100644 index 000000000..a9f025167 --- /dev/null +++ b/vendor/github.com/vivint/infectious/gf_alg.go @@ -0,0 +1,422 @@ +// The MIT License (MIT) +// +// Copyright (C) 2016-2017 Vivint, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package infectious + +import ( + "fmt" + "strings" + "unsafe" +) + +// +// basic helpers around gf(2^8) values +// + +type gfVal byte + +func gfConst(val byte) gfVal { + return gfVal(val) +} + +func (b gfVal) pow(val int) gfVal { + out := gfVal(1) + mul_base := gf_mul_table[b][:] + for i := 0; i < val; i++ { + out = gfVal(mul_base[out]) + } + return out +} + +func (a gfVal) mul(b gfVal) gfVal { + return gfVal(gf_mul_table[a][b]) +} + +func (a gfVal) div(b gfVal) (gfVal, error) { + if b == 0 { + return 0, Error.New("divide by zero") + } + if a == 0 { + return 0, nil + } + return gfVal(gf_exp[gf_log[a]-gf_log[b]]), nil +} + +func (a gfVal) add(b gfVal) gfVal { + return gfVal(a ^ b) +} + +func (a gfVal) isZero() bool { + return a == 0 +} + +func (a gfVal) inv() (gfVal, error) { + if a == 0 { + return 0, Error.New("invert zero") + } + return gfVal(gf_exp[255-gf_log[a]]), nil +} + +// +// basic helpers about a slice of gf(2^8) values +// + +type gfVals []gfVal + +func (a gfVals) unsafeBytes() []byte { + return *(*[]byte)(unsafe.Pointer(&a)) +} + +func (a gfVals) dot(b gfVals) gfVal { + out := gfConst(0) + for i := range a { + out = out.add(a[i].mul(b[i])) + } + return out +} + +func (a gfVals) String() string { + return fmt.Sprintf("%02x", a.unsafeBytes()) +} + +// +// basic helpers for dealing with polynomials with coefficients in gf(2^8) +// + +type gfPoly []gfVal + +func polyZero(size int) gfPoly { + out := make(gfPoly, size) + for i := range out { + out[i] = gfConst(0) + } + return out +} + +func (p gfPoly) isZero() bool { + for _, coef := range p { + if !coef.isZero() { + return false + } + } + return true +} + +func (p gfPoly) deg() int { + return len(p) - 1 +} + +func (p gfPoly) index(power int) gfVal { + if power < 0 { + return gfConst(0) + } + which := p.deg() - power + if which < 0 { + return gfConst(0) + } + return p[which] +} + +func (p gfPoly) scale(factor gfVal) gfPoly { + out := make(gfPoly, len(p)) + for i, coef := range p { + out[i] = coef.mul(factor) + } + return out +} + +func (p *gfPoly) set(pow int, coef gfVal) { + which := p.deg() - pow + if which < 0 { + *p = append(polyZero(-which), *p...) + which = p.deg() - pow + } + (*p)[which] = coef +} + +func (p gfPoly) add(b gfPoly) gfPoly { + size := len(p) + if lb := len(b); lb > size { + size = lb + } + out := make(gfPoly, size) + for i := range out { + pi := p.index(i) + bi := b.index(i) + out.set(i, pi.add(bi)) + } + return out +} + +func (p gfPoly) div(b gfPoly) (q, r gfPoly, err error) { + // sanitize the divisor by removing leading zeros. + for len(b) > 0 && b[0].isZero() { + b = b[1:] + } + if len(b) == 0 { + return nil, nil, Error.New("divide by zero") + } + + // sanitize the base poly as well + for len(p) > 0 && p[0].isZero() { + p = p[1:] + } + if len(p) == 0 { + return polyZero(1), polyZero(1), nil + } + + const debug = false + indent := 2*len(b) + 1 + + if debug { + fmt.Printf("%02x %02x\n", b, p) + } + + for b.deg() <= p.deg() { + leading_p := p.index(p.deg()) + leading_b := b.index(b.deg()) + + if debug { + fmt.Printf("leading_p: %02x leading_b: %02x\n", + leading_p, leading_b) + } + + coef, err := leading_p.div(leading_b) + if err != nil { + return nil, nil, err + } + + if debug { + fmt.Printf("coef: %02x\n", coef) + } + + q = append(q, coef) + + scaled := b.scale(coef) + padded := append(scaled, polyZero(p.deg()-scaled.deg())...) + + if debug { + fmt.Printf("%s%02x\n", strings.Repeat(" ", indent), padded) + indent += 2 + } + + p = p.add(padded) + if !p[0].isZero() { + return nil, nil, Error.New("alg error: %x", p) + } + p = p[1:] + } + + for len(p) > 1 && p[0].isZero() { + p = p[1:] + } + + return q, p, nil +} + +func (p gfPoly) eval(x gfVal) gfVal { + out := gfConst(0) + for i := 0; i <= p.deg(); i++ { + x_i := x.pow(i) + p_i := p.index(i) + out = out.add(p_i.mul(x_i)) + } + return out +} + +// +// basic helpers for matrices in gf(2^8) +// + +type gfMat struct { + d gfVals + r, c int +} + +func matrixNew(i, j int) gfMat { + return gfMat{ + d: make(gfVals, i*j), + r: i, c: j, + } +} + +func (m gfMat) String() (out string) { + if m.r == 0 { + return "" + } + + for i := 0; i < m.r-1; i++ { + out += fmt.Sprintln(m.indexRow(i)) + } + out += fmt.Sprint(m.indexRow(m.r - 1)) + + return out +} + +func (m gfMat) index(i, j int) int { + return m.c*i + j +} + +func (m gfMat) get(i, j int) gfVal { + return m.d[m.index(i, j)] +} + +func (m gfMat) set(i, j int, val gfVal) { + m.d[m.index(i, j)] = val +} + +func (m gfMat) indexRow(i int) gfVals { + return m.d[m.index(i, 0):m.index(i+1, 0)] +} + +func (m gfMat) swapRow(i, j int) { + tmp := make(gfVals, m.r) + ri := m.indexRow(i) + rj := m.indexRow(j) + copy(tmp, ri) + copy(ri, rj) + copy(rj, tmp) +} + +func (m gfMat) scaleRow(i int, val gfVal) { + ri := m.indexRow(i) + for i := range ri { + ri[i] = ri[i].mul(val) + } +} + +func (m gfMat) addmulRow(i, j int, val gfVal) { + ri := m.indexRow(i) + rj := m.indexRow(j) + addmul(rj.unsafeBytes(), ri.unsafeBytes(), byte(val)) +} + +// in place invert. the output is put into a and m is turned into the identity +// matrix. a is expected to be the identity matrix. +func (m gfMat) invertWith(a gfMat) error { + for i := 0; i < m.r; i++ { + p_row, p_val := i, m.get(i, i) + for j := i + 1; j < m.r && p_val.isZero(); j++ { + p_row, p_val = j, m.get(j, i) + } + if p_val.isZero() { + continue + } + + if p_row != i { + m.swapRow(i, p_row) + a.swapRow(i, p_row) + } + + inv, err := p_val.inv() + if err != nil { + return err + } + m.scaleRow(i, inv) + a.scaleRow(i, inv) + + for j := i + 1; j < m.r; j++ { + leading := m.get(j, i) + m.addmulRow(i, j, leading) + a.addmulRow(i, j, leading) + } + } + + for i := m.r - 1; i > 0; i-- { + for j := i - 1; j >= 0; j-- { + trailing := m.get(j, i) + m.addmulRow(i, j, trailing) + a.addmulRow(i, j, trailing) + } + } + + return nil +} + +// in place standardize. +func (m gfMat) standardize() error { + for i := 0; i < m.r; i++ { + p_row, p_val := i, m.get(i, i) + for j := i + 1; j < m.r && p_val.isZero(); j++ { + p_row, p_val = j, m.get(j, i) + } + if p_val.isZero() { + continue + } + + if p_row != i { + m.swapRow(i, p_row) + } + + inv, err := p_val.inv() + if err != nil { + return err + } + m.scaleRow(i, inv) + + for j := i + 1; j < m.r; j++ { + leading := m.get(j, i) + m.addmulRow(i, j, leading) + } + } + + for i := m.r - 1; i > 0; i-- { + for j := i - 1; j >= 0; j-- { + trailing := m.get(j, i) + m.addmulRow(i, j, trailing) + } + } + + return nil +} + +// parity returns the new matrix because it changes dimensions and stuff. it +// can be done in place, but is easier to implement with a copy. +func (m gfMat) parity() gfMat { + // we assume m is in standard form already + // it is of form [I_r | P] + // our output will be [-P_transpose | I_(c - r)] + // but our field is of characteristic 2 so we do not need the negative. + + // In terms of m: + // I_r has r rows and r columns. + // P has r rows and c-r columns. + // P_transpose has c-r rows, and r columns. + // I_(c-r) has c-r rows and c-r columns. + // so: out.r == c-r, out.c == r + c - r == c + + out := matrixNew(m.c-m.r, m.c) + + // step 1. fill in the identity. it starts at column offset r. + for i := 0; i < m.c-m.r; i++ { + out.set(i, i+m.r, gfConst(1)) + } + + // step 2: fill in the transposed P matrix. i and j are in terms of out. + for i := 0; i < m.c-m.r; i++ { + for j := 0; j < m.r; j++ { + out.set(i, j, m.get(j, i+m.r)) + } + } + + return out +} diff --git a/vendor/github.com/vivint/infectious/math.go b/vendor/github.com/vivint/infectious/math.go new file mode 100644 index 000000000..d31c6bb0e --- /dev/null +++ b/vendor/github.com/vivint/infectious/math.go @@ -0,0 +1,179 @@ +// (C) 1996-1998 Luigi Rizzo (luigi@iet.unipi.it) +// 2009-2010 Jack Lloyd (lloyd@randombit.net) +// 2011 Billy Brumley (billy.brumley@aalto.fi) +// 2016-2017 Vivint, Inc. (jeff.wendling@vivint.com) +// +// Portions derived from code by Phil Karn (karn@ka9q.ampr.org), +// Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari +// Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +package infectious + +import "bytes" + +type pivotSearcher struct { + k int + ipiv []bool +} + +func newPivotSearcher(k int) *pivotSearcher { + return &pivotSearcher{ + k: k, + ipiv: make([]bool, k), + } +} + +func (p *pivotSearcher) search(col int, matrix []byte) (int, int, error) { + if p.ipiv[col] == false && matrix[col*p.k+col] != 0 { + p.ipiv[col] = true + return col, col, nil + } + + for row := 0; row < p.k; row++ { + if p.ipiv[row] { + continue + } + + for i := 0; i < p.k; i++ { + if p.ipiv[i] == false && matrix[row*p.k+i] != 0 { + p.ipiv[i] = true + return row, i, nil + } + } + } + + return 0, 0, Error.New("pivot not found") +} + +func swap(a, b *byte) { + tmp := *a + *a = *b + *b = tmp +} + +// TODO(jeff): matrix is a K*K array, row major. +func invertMatrix(matrix []byte, k int) error { + pivot_searcher := newPivotSearcher(k) + indxc := make([]int, k) + indxr := make([]int, k) + id_row := make([]byte, k) + + for col := 0; col < k; col++ { + icol, irow, err := pivot_searcher.search(col, matrix) + if err != nil { + return err + } + + if irow != icol { + for i := 0; i < k; i++ { + swap(&matrix[irow*k+i], &matrix[icol*k+i]) + } + } + + indxr[col] = irow + indxc[col] = icol + pivot_row := matrix[icol*k:][:k] + c := pivot_row[icol] + + if c == 0 { + return Error.New("singular matrix") + } + + if c != 1 { + c = gf_inverse[c] + pivot_row[icol] = 1 + mul_c := gf_mul_table[c][:] + + for i := 0; i < k; i++ { + pivot_row[i] = mul_c[pivot_row[i]] + } + } + + id_row[icol] = 1 + if !bytes.Equal(pivot_row, id_row) { + p := matrix + for i := 0; i < k; i++ { + if i != icol { + c = p[icol] + p[icol] = 0 + addmul(p[:k], pivot_row, c) + } + p = p[k:] + } + } + + id_row[icol] = 0 + } + + for i := 0; i < k; i++ { + if indxr[i] != indxc[i] { + for row := 0; row < k; row++ { + swap(&matrix[row*k+indxr[i]], &matrix[row*k+indxc[i]]) + } + } + } + return nil +} + +func createInvertedVdm(vdm []byte, k int) { + if k == 1 { + vdm[0] = 1 + return + } + + b := make([]byte, k) + c := make([]byte, k) + + c[k-1] = 0 + for i := 1; i < k; i++ { + mul_p_i := gf_mul_table[gf_exp[i]][:] + for j := k - 1 - (i - 1); j < k-1; j++ { + c[j] ^= mul_p_i[c[j+1]] + } + c[k-1] ^= gf_exp[i] + } + + for row := 0; row < k; row++ { + index := 0 + if row != 0 { + index = int(gf_exp[row]) + } + mul_p_row := gf_mul_table[index][:] + + t := byte(1) + b[k-1] = 1 + for i := k - 2; i >= 0; i-- { + b[i] = c[i+1] ^ mul_p_row[b[i+1]] + t = b[i] ^ mul_p_row[t] + } + + mul_t_inv := gf_mul_table[gf_inverse[t]][:] + for col := 0; col < k; col++ { + vdm[col*k+row] = mul_t_inv[b[col]] + } + } +} diff --git a/vendor/github.com/vivint/infectious/tables.go b/vendor/github.com/vivint/infectious/tables.go new file mode 100644 index 000000000..d382df7af --- /dev/null +++ b/vendor/github.com/vivint/infectious/tables.go @@ -0,0 +1,154 @@ +// (C) 1996-1998 Luigi Rizzo (luigi@iet.unipi.it) +// 2009-2010 Jack Lloyd (lloyd@randombit.net) +// 2011 Billy Brumley (billy.brumley@aalto.fi) +// 2016-2017 Vivint, Inc. (jeff.wendling@vivint.com) +// +// Portions derived from code by Phil Karn (karn@ka9q.ampr.org), +// Robert Morelos-Zaragoza (robert@spectra.eng.hawaii.edu) and Hari +// Thirumoorthy (harit@spectra.eng.hawaii.edu), Aug 1995 +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, +// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +package infectious + +var ( + gf_exp = [510]byte{ + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1D, 0x3A, 0x74, + 0xE8, 0xCD, 0x87, 0x13, 0x26, 0x4C, 0x98, 0x2D, 0x5A, 0xB4, 0x75, + 0xEA, 0xC9, 0x8F, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0x9D, + 0x27, 0x4E, 0x9C, 0x25, 0x4A, 0x94, 0x35, 0x6A, 0xD4, 0xB5, 0x77, + 0xEE, 0xC1, 0x9F, 0x23, 0x46, 0x8C, 0x05, 0x0A, 0x14, 0x28, 0x50, + 0xA0, 0x5D, 0xBA, 0x69, 0xD2, 0xB9, 0x6F, 0xDE, 0xA1, 0x5F, 0xBE, + 0x61, 0xC2, 0x99, 0x2F, 0x5E, 0xBC, 0x65, 0xCA, 0x89, 0x0F, 0x1E, + 0x3C, 0x78, 0xF0, 0xFD, 0xE7, 0xD3, 0xBB, 0x6B, 0xD6, 0xB1, 0x7F, + 0xFE, 0xE1, 0xDF, 0xA3, 0x5B, 0xB6, 0x71, 0xE2, 0xD9, 0xAF, 0x43, + 0x86, 0x11, 0x22, 0x44, 0x88, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xBD, + 0x67, 0xCE, 0x81, 0x1F, 0x3E, 0x7C, 0xF8, 0xED, 0xC7, 0x93, 0x3B, + 0x76, 0xEC, 0xC5, 0x97, 0x33, 0x66, 0xCC, 0x85, 0x17, 0x2E, 0x5C, + 0xB8, 0x6D, 0xDA, 0xA9, 0x4F, 0x9E, 0x21, 0x42, 0x84, 0x15, 0x2A, + 0x54, 0xA8, 0x4D, 0x9A, 0x29, 0x52, 0xA4, 0x55, 0xAA, 0x49, 0x92, + 0x39, 0x72, 0xE4, 0xD5, 0xB7, 0x73, 0xE6, 0xD1, 0xBF, 0x63, 0xC6, + 0x91, 0x3F, 0x7E, 0xFC, 0xE5, 0xD7, 0xB3, 0x7B, 0xF6, 0xF1, 0xFF, + 0xE3, 0xDB, 0xAB, 0x4B, 0x96, 0x31, 0x62, 0xC4, 0x95, 0x37, 0x6E, + 0xDC, 0xA5, 0x57, 0xAE, 0x41, 0x82, 0x19, 0x32, 0x64, 0xC8, 0x8D, + 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0xDD, 0xA7, 0x53, 0xA6, 0x51, + 0xA2, 0x59, 0xB2, 0x79, 0xF2, 0xF9, 0xEF, 0xC3, 0x9B, 0x2B, 0x56, + 0xAC, 0x45, 0x8A, 0x09, 0x12, 0x24, 0x48, 0x90, 0x3D, 0x7A, 0xF4, + 0xF5, 0xF7, 0xF3, 0xFB, 0xEB, 0xCB, 0x8B, 0x0B, 0x16, 0x2C, 0x58, + 0xB0, 0x7D, 0xFA, 0xE9, 0xCF, 0x83, 0x1B, 0x36, 0x6C, 0xD8, 0xAD, + 0x47, 0x8E, + + 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1D, 0x3A, 0x74, + 0xE8, 0xCD, 0x87, 0x13, 0x26, 0x4C, 0x98, 0x2D, 0x5A, 0xB4, 0x75, + 0xEA, 0xC9, 0x8F, 0x03, 0x06, 0x0C, 0x18, 0x30, 0x60, 0xC0, 0x9D, + 0x27, 0x4E, 0x9C, 0x25, 0x4A, 0x94, 0x35, 0x6A, 0xD4, 0xB5, 0x77, + 0xEE, 0xC1, 0x9F, 0x23, 0x46, 0x8C, 0x05, 0x0A, 0x14, 0x28, 0x50, + 0xA0, 0x5D, 0xBA, 0x69, 0xD2, 0xB9, 0x6F, 0xDE, 0xA1, 0x5F, 0xBE, + 0x61, 0xC2, 0x99, 0x2F, 0x5E, 0xBC, 0x65, 0xCA, 0x89, 0x0F, 0x1E, + 0x3C, 0x78, 0xF0, 0xFD, 0xE7, 0xD3, 0xBB, 0x6B, 0xD6, 0xB1, 0x7F, + 0xFE, 0xE1, 0xDF, 0xA3, 0x5B, 0xB6, 0x71, 0xE2, 0xD9, 0xAF, 0x43, + 0x86, 0x11, 0x22, 0x44, 0x88, 0x0D, 0x1A, 0x34, 0x68, 0xD0, 0xBD, + 0x67, 0xCE, 0x81, 0x1F, 0x3E, 0x7C, 0xF8, 0xED, 0xC7, 0x93, 0x3B, + 0x76, 0xEC, 0xC5, 0x97, 0x33, 0x66, 0xCC, 0x85, 0x17, 0x2E, 0x5C, + 0xB8, 0x6D, 0xDA, 0xA9, 0x4F, 0x9E, 0x21, 0x42, 0x84, 0x15, 0x2A, + 0x54, 0xA8, 0x4D, 0x9A, 0x29, 0x52, 0xA4, 0x55, 0xAA, 0x49, 0x92, + 0x39, 0x72, 0xE4, 0xD5, 0xB7, 0x73, 0xE6, 0xD1, 0xBF, 0x63, 0xC6, + 0x91, 0x3F, 0x7E, 0xFC, 0xE5, 0xD7, 0xB3, 0x7B, 0xF6, 0xF1, 0xFF, + 0xE3, 0xDB, 0xAB, 0x4B, 0x96, 0x31, 0x62, 0xC4, 0x95, 0x37, 0x6E, + 0xDC, 0xA5, 0x57, 0xAE, 0x41, 0x82, 0x19, 0x32, 0x64, 0xC8, 0x8D, + 0x07, 0x0E, 0x1C, 0x38, 0x70, 0xE0, 0xDD, 0xA7, 0x53, 0xA6, 0x51, + 0xA2, 0x59, 0xB2, 0x79, 0xF2, 0xF9, 0xEF, 0xC3, 0x9B, 0x2B, 0x56, + 0xAC, 0x45, 0x8A, 0x09, 0x12, 0x24, 0x48, 0x90, 0x3D, 0x7A, 0xF4, + 0xF5, 0xF7, 0xF3, 0xFB, 0xEB, 0xCB, 0x8B, 0x0B, 0x16, 0x2C, 0x58, + 0xB0, 0x7D, 0xFA, 0xE9, 0xCF, 0x83, 0x1B, 0x36, 0x6C, 0xD8, 0xAD, + 0x47, 0x8E, + } + gf_log = [256]byte{ + 0xFF, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1A, 0xC6, 0x03, 0xDF, 0x33, + 0xEE, 0x1B, 0x68, 0xC7, 0x4B, 0x04, 0x64, 0xE0, 0x0E, 0x34, 0x8D, + 0xEF, 0x81, 0x1C, 0xC1, 0x69, 0xF8, 0xC8, 0x08, 0x4C, 0x71, 0x05, + 0x8A, 0x65, 0x2F, 0xE1, 0x24, 0x0F, 0x21, 0x35, 0x93, 0x8E, 0xDA, + 0xF0, 0x12, 0x82, 0x45, 0x1D, 0xB5, 0xC2, 0x7D, 0x6A, 0x27, 0xF9, + 0xB9, 0xC9, 0x9A, 0x09, 0x78, 0x4D, 0xE4, 0x72, 0xA6, 0x06, 0xBF, + 0x8B, 0x62, 0x66, 0xDD, 0x30, 0xFD, 0xE2, 0x98, 0x25, 0xB3, 0x10, + 0x91, 0x22, 0x88, 0x36, 0xD0, 0x94, 0xCE, 0x8F, 0x96, 0xDB, 0xBD, + 0xF1, 0xD2, 0x13, 0x5C, 0x83, 0x38, 0x46, 0x40, 0x1E, 0x42, 0xB6, + 0xA3, 0xC3, 0x48, 0x7E, 0x6E, 0x6B, 0x3A, 0x28, 0x54, 0xFA, 0x85, + 0xBA, 0x3D, 0xCA, 0x5E, 0x9B, 0x9F, 0x0A, 0x15, 0x79, 0x2B, 0x4E, + 0xD4, 0xE5, 0xAC, 0x73, 0xF3, 0xA7, 0x57, 0x07, 0x70, 0xC0, 0xF7, + 0x8C, 0x80, 0x63, 0x0D, 0x67, 0x4A, 0xDE, 0xED, 0x31, 0xC5, 0xFE, + 0x18, 0xE3, 0xA5, 0x99, 0x77, 0x26, 0xB8, 0xB4, 0x7C, 0x11, 0x44, + 0x92, 0xD9, 0x23, 0x20, 0x89, 0x2E, 0x37, 0x3F, 0xD1, 0x5B, 0x95, + 0xBC, 0xCF, 0xCD, 0x90, 0x87, 0x97, 0xB2, 0xDC, 0xFC, 0xBE, 0x61, + 0xF2, 0x56, 0xD3, 0xAB, 0x14, 0x2A, 0x5D, 0x9E, 0x84, 0x3C, 0x39, + 0x53, 0x47, 0x6D, 0x41, 0xA2, 0x1F, 0x2D, 0x43, 0xD8, 0xB7, 0x7B, + 0xA4, 0x76, 0xC4, 0x17, 0x49, 0xEC, 0x7F, 0x0C, 0x6F, 0xF6, 0x6C, + 0xA1, 0x3B, 0x52, 0x29, 0x9D, 0x55, 0xAA, 0xFB, 0x60, 0x86, 0xB1, + 0xBB, 0xCC, 0x3E, 0x5A, 0xCB, 0x59, 0x5F, 0xB0, 0x9C, 0xA9, 0xA0, + 0x51, 0x0B, 0xF5, 0x16, 0xEB, 0x7A, 0x75, 0x2C, 0xD7, 0x4F, 0xAE, + 0xD5, 0xE9, 0xE6, 0xE7, 0xAD, 0xE8, 0x74, 0xD6, 0xF4, 0xEA, 0xA8, + 0x50, 0x58, 0xAF, + } + gf_inverse = [256]byte{ + 0x00, 0x01, 0x8E, 0xF4, 0x47, 0xA7, 0x7A, 0xBA, 0xAD, 0x9D, 0xDD, + 0x98, 0x3D, 0xAA, 0x5D, 0x96, 0xD8, 0x72, 0xC0, 0x58, 0xE0, 0x3E, + 0x4C, 0x66, 0x90, 0xDE, 0x55, 0x80, 0xA0, 0x83, 0x4B, 0x2A, 0x6C, + 0xED, 0x39, 0x51, 0x60, 0x56, 0x2C, 0x8A, 0x70, 0xD0, 0x1F, 0x4A, + 0x26, 0x8B, 0x33, 0x6E, 0x48, 0x89, 0x6F, 0x2E, 0xA4, 0xC3, 0x40, + 0x5E, 0x50, 0x22, 0xCF, 0xA9, 0xAB, 0x0C, 0x15, 0xE1, 0x36, 0x5F, + 0xF8, 0xD5, 0x92, 0x4E, 0xA6, 0x04, 0x30, 0x88, 0x2B, 0x1E, 0x16, + 0x67, 0x45, 0x93, 0x38, 0x23, 0x68, 0x8C, 0x81, 0x1A, 0x25, 0x61, + 0x13, 0xC1, 0xCB, 0x63, 0x97, 0x0E, 0x37, 0x41, 0x24, 0x57, 0xCA, + 0x5B, 0xB9, 0xC4, 0x17, 0x4D, 0x52, 0x8D, 0xEF, 0xB3, 0x20, 0xEC, + 0x2F, 0x32, 0x28, 0xD1, 0x11, 0xD9, 0xE9, 0xFB, 0xDA, 0x79, 0xDB, + 0x77, 0x06, 0xBB, 0x84, 0xCD, 0xFE, 0xFC, 0x1B, 0x54, 0xA1, 0x1D, + 0x7C, 0xCC, 0xE4, 0xB0, 0x49, 0x31, 0x27, 0x2D, 0x53, 0x69, 0x02, + 0xF5, 0x18, 0xDF, 0x44, 0x4F, 0x9B, 0xBC, 0x0F, 0x5C, 0x0B, 0xDC, + 0xBD, 0x94, 0xAC, 0x09, 0xC7, 0xA2, 0x1C, 0x82, 0x9F, 0xC6, 0x34, + 0xC2, 0x46, 0x05, 0xCE, 0x3B, 0x0D, 0x3C, 0x9C, 0x08, 0xBE, 0xB7, + 0x87, 0xE5, 0xEE, 0x6B, 0xEB, 0xF2, 0xBF, 0xAF, 0xC5, 0x64, 0x07, + 0x7B, 0x95, 0x9A, 0xAE, 0xB6, 0x12, 0x59, 0xA5, 0x35, 0x65, 0xB8, + 0xA3, 0x9E, 0xD2, 0xF7, 0x62, 0x5A, 0x85, 0x7D, 0xA8, 0x3A, 0x29, + 0x71, 0xC8, 0xF6, 0xF9, 0x43, 0xD7, 0xD6, 0x10, 0x73, 0x76, 0x78, + 0x99, 0x0A, 0x19, 0x91, 0x14, 0x3F, 0xE6, 0xF0, 0x86, 0xB1, 0xE2, + 0xF1, 0xFA, 0x74, 0xF3, 0xB4, 0x6D, 0x21, 0xB2, 0x6A, 0xE3, 0xE7, + 0xB5, 0xEA, 0x03, 0x8F, 0xD3, 0xC9, 0x42, 0xD4, 0xE8, 0x75, 0x7F, + 0xFF, 0x7E, 0xFD, + } + gf_mul_table = [256][256]byte{} +) + +func init() { + for i := 0; i < 256; i++ { + for j := 0; j < 256; j++ { + log_i := int(gf_log[i]) + log_j := int(gf_log[j]) + gf_mul_table[i][j] = gf_exp[(log_i+log_j)%255] + } + } + for i := 0; i < 256; i++ { + gf_mul_table[0][i], gf_mul_table[i][0] = 0, 0 + } +} diff --git a/vendor/github.com/zeebo/errs/.gitignore b/vendor/github.com/zeebo/errs/.gitignore new file mode 100644 index 000000000..722d5e71d --- /dev/null +++ b/vendor/github.com/zeebo/errs/.gitignore @@ -0,0 +1 @@ +.vscode diff --git a/vendor/github.com/zeebo/errs/AUTHORS b/vendor/github.com/zeebo/errs/AUTHORS new file mode 100644 index 000000000..a970ee577 --- /dev/null +++ b/vendor/github.com/zeebo/errs/AUTHORS @@ -0,0 +1,4 @@ +Egon Elbre +Jeff Wendling +JT Olio +Kaloyan Raev \ No newline at end of file diff --git a/vendor/github.com/zeebo/errs/LICENSE b/vendor/github.com/zeebo/errs/LICENSE new file mode 100644 index 000000000..3ba91930e --- /dev/null +++ b/vendor/github.com/zeebo/errs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 The Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zeebo/errs/README.md b/vendor/github.com/zeebo/errs/README.md new file mode 100644 index 000000000..be6bc0b40 --- /dev/null +++ b/vendor/github.com/zeebo/errs/README.md @@ -0,0 +1,235 @@ +# errs + +[![GoDoc](https://godoc.org/github.com/zeebo/errs?status.svg)](https://godoc.org/github.com/zeebo/errs) +[![Sourcegraph](https://sourcegraph.com/github.com/zeebo/errs/-/badge.svg)](https://sourcegraph.com/github.com/zeebo/errs?badge) +[![Go Report Card](https://goreportcard.com/badge/github.com/zeebo/errs)](https://goreportcard.com/report/github.com/zeebo/errs) + +errs is a package for making errors friendly and easy. + +### Creating Errors + +The easiest way to use it, is to use the package level [New][New] function. +It's much like `fmt.Errorf`, but better. For example: + +```go +func checkThing() error { + return errs.New("what's up with %q?", "zeebo") +} +``` + +Why is it better? Errors come with a stack trace that is only printed +when a `"+"` character is used in the format string. This should retain the +benefits of being able to diagnose where and why errors happen, without all of +the noise of printing a stack trace in every situation. For example: + +```go +func doSomeRealWork() { + err := checkThing() + if err != nil { + fmt.Printf("%+v\n", err) // contains stack trace if it's a errs error. + fmt.Printf("%v\n", err) // does not contain a stack trace + return + } +} +``` + +### Error Classes + +You can create a [Class][Class] of errors and check if any error was created by +that class. The class name is prefixed to all of the errors it creates. For example: + +```go +var Unauthorized = errs.Class("unauthorized") + +func checkUser(username, password string) error { + if username != "zeebo" { + return Unauthorized.New("who is %q?", username) + } + if password != "hunter2" { + return Unauthorized.New("that's not a good password, jerkmo!") + } + return nil +} + +func handleRequest() { + if err := checkUser("zeebo", "hunter3"); Unauthorized.Has(err) { + fmt.Println(err) + } + + // output: + // unauthorized: that's not a good password, jerkmo! +} +``` + +Classes can also [Wrap][ClassWrap] other errors, and errors may be wrapped +multiple times. For example: + +```go +var ( + Error = errs.Class("mypackage") + Unauthorized = errs.Class("unauthorized") +) + +func deep3() error { + return fmt.Errorf("ouch") +} + +func deep2() error { + return Unauthorized.Wrap(deep3()) +} + +func deep1() error { + return Error.Wrap(deep2()) +} + +func deep() { + fmt.Println(deep1()) + + // output: + // mypackage: unauthorized: ouch +} +``` + +In the above example, both `Error.Has(deep1())` and `Unauthorized.Has(deep1())` +would return `true`, and the stack trace would only be recorded once at the +`deep2` call. + +In addition, when an error has been wrapped, wrapping it again with the same class will +not do anything. For example: + +```go +func doubleWrap() { + fmt.Println(Error.Wrap(Error.New("foo"))) + + // output: + // mypackage: foo +} +``` + +This is to make it an easier decision if you should wrap or not (you should). + +### Utilities + +[Classes][Classes] is a helper function to get a slice of classes that an error +has. The latest wrap is first in the slice. For example: + +```go +func getClasses() { + classes := errs.Classes(deep1()) + fmt.Println(classes[0] == &Error) + fmt.Println(classes[1] == &Unauthorized) + + // output: + // true + // true +} +``` + +Finally, a helper function, [Unwrap][Unwrap] is provided to get the +wrapped error in cases where you might want to inspect details. For +example: + +```go +var Error = errs.Class("mypackage") + +func getHandle() (*os.File, error) { + fh, err := os.Open("neat_things") + if err != nil { + return nil, Error.Wrap(err) + } + return fh, nil +} + +func checkForNeatThings() { + fh, err := getHandle() + if os.IsNotExist(errs.Unwrap(err)) { + panic("no neat things?!") + } + if err != nil { + panic("phew, at least there are neat things, even if i can't see them") + } + fh.Close() +} +``` + +It knows about both the `Cause() error` and `Unwrap() error` methods that are +often used in the community, and will call them as many times as possible. + +### Defer + +The package also provides [WrapP][WrapP] versions of [Wrap][Wrap] that are useful +in defer contexts. For example: + +```go +func checkDefer() (err error) { + defer Error.WrapP(&err) + + fh, err := os.Open("secret_stash") + if err != nil { + return nil, err + } + return fh.Close() +} +``` + +### Groups + +[Groups][Group] allow one to collect a set of errors. For example: + +```go +func tonsOfErrors() error { + var group errs.Group + for _, work := range someWork { + group.Add(maybeErrors(work)) + } + return group.Err() +} +``` + +Some things to note: + +- The [Add][GroupAdd] method only adds to the group if the passed in error is non-nil. +- The [Err][GroupErr] method returns an error only if non-nil errors have been added, and + additionally returns just the error if only one error was added. Thus, we always + have that if you only call `group.Add(err)`, then `group.Err() == err`. + +The returned error will format itself similarly: + +```go +func groupFormat() { + var group errs.Group + group.Add(errs.New("first")) + group.Add(errs.New("second")) + err := group.Err() + + fmt.Printf("%v\n", err) + fmt.Println() + fmt.Printf("%+v\n", err) + + // output: + // first; second + // + // group: + // --- first + // ... stack trace + // --- second + // ... stack trace +} +``` + +### Contributing + +errs is released under an MIT License. If you want to contribute, be sure to +add yourself to the list in AUTHORS. + +[New]: https://godoc.org/github.com/zeebo/errs#New +[Wrap]: https://godoc.org/github.com/zeebo/errs#Wrap +[WrapP]: https://godoc.org/github.com/zeebo/errs#WrapP +[Class]: https://godoc.org/github.com/zeebo/errs#Class +[ClassNew]: https://godoc.org/github.com/zeebo/errs#Class.New +[ClassWrap]: https://godoc.org/github.com/zeebo/errs#Class.Wrap +[Unwrap]: https://godoc.org/github.com/zeebo/errs#Unwrap +[Classes]: https://godoc.org/github.com/zeebo/errs#Classes +[Group]: https://godoc.org/github.com/zeebo/errs#Group +[GroupAdd]: https://godoc.org/github.com/zeebo/errs#Group.Add +[GroupErr]: https://godoc.org/github.com/zeebo/errs#Group.Err diff --git a/vendor/github.com/zeebo/errs/errs.go b/vendor/github.com/zeebo/errs/errs.go new file mode 100644 index 000000000..0705bac4a --- /dev/null +++ b/vendor/github.com/zeebo/errs/errs.go @@ -0,0 +1,296 @@ +// Package errs provides a simple error package with stack traces. +package errs + +import ( + "fmt" + "io" + "runtime" +) + +// Namer is implemented by all errors returned in this package. It returns a +// name for the class of error it is, and a boolean indicating if the name is +// valid. +type Namer interface{ Name() (string, bool) } + +// Causer is implemented by all errors returned in this package. It returns +// the underlying cause of the error, or nil if there is no underlying cause. +type Causer interface{ Cause() error } + +// unwrapper is implemented by all errors returned in this package. It returns +// the underlying cause of the error, or nil if there is no underlying error. +type unwrapper interface{ Unwrap() error } + +// ungrouper is implemented by combinedError returned in this package. It +// returns all underlying errors, or nil if there is no underlying error. +type ungrouper interface{ Ungroup() []error } + +// New returns an error not contained in any class. This is the same as calling +// fmt.Errorf(...) except it captures a stack trace on creation. +func New(format string, args ...interface{}) error { + return (*Class).create(nil, 3, fmt.Errorf(format, args...)) +} + +// Wrap returns an error not contained in any class. It just associates a stack +// trace with the error. Wrap returns nil if err is nil. +func Wrap(err error) error { + return (*Class).create(nil, 3, err) +} + +// WrapP stores into the error pointer if it contains a non-nil error an error not +// contained in any class. It just associates a stack trace with the error. WrapP +// does nothing if the pointer or pointed at error is nil. +func WrapP(err *error) { + if err != nil && *err != nil { + *err = (*Class).create(nil, 3, *err) + } +} + +// Often, we call Cause as much as possible. Since comparing arbitrary +// interfaces with equality isn't panic safe, we only loop up to 100 +// times to ensure that a poor implementation that causes a cycle does +// not run forever. +const maxCause = 100 + +// Unwrap returns the underlying error, if any, or just the error. +func Unwrap(err error) error { + for i := 0; err != nil && i < maxCause; i++ { + var nerr error + + switch e := err.(type) { + case Causer: + nerr = e.Cause() + + case unwrapper: + nerr = e.Unwrap() + } + + if nerr == nil { + return err + } + err = nerr + } + + return err +} + +// Classes returns all the classes that have wrapped the error. +func Classes(err error) (classes []*Class) { + causes := 0 + for { + switch e := err.(type) { + case *errorT: + if e.class != nil { + classes = append(classes, e.class) + } + err = e.err + continue + + case Causer: + err = e.Cause() + + case unwrapper: + err = e.Unwrap() + + default: + return classes + } + + if causes >= maxCause { + return classes + } + causes++ + } +} + +// Is checks if any of the underlying errors matches target +func Is(err, target error) bool { + return IsFunc(err, func(err error) bool { + return err == target + }) +} + +// IsFunc checks if any of the underlying errors matches the func +func IsFunc(err error, is func(err error) bool) bool { + causes := 0 + errs := []error{err} + + for len(errs) > 0 { + var next []error + for _, err := range errs { + if is(err) { + return true + } + + switch e := err.(type) { + case ungrouper: + ungrouped := e.Ungroup() + for _, unerr := range ungrouped { + if unerr != nil { + next = append(next, unerr) + } + } + case Causer: + cause := e.Cause() + if cause != nil { + next = append(next, cause) + } + case unwrapper: + unwrapped := e.Unwrap() + if unwrapped != nil { + next = append(next, unwrapped) + } + } + + if causes >= maxCause { + return false + } + causes++ + } + errs = next + } + + return false +} + +// +// error classes +// + +// Class represents a class of errors. You can construct errors, and check if +// errors are part of the class. +type Class string + +// Has returns true if the passed in error was wrapped by this class. +func (c *Class) Has(err error) bool { + for { + errt, ok := err.(*errorT) + if !ok { + return false + } + if errt.class == c { + return true + } + err = errt.err + } +} + +// New constructs an error with the format string that will be contained by +// this class. This is the same as calling Wrap(fmt.Errorf(...)). +func (c *Class) New(format string, args ...interface{}) error { + return c.create(3, fmt.Errorf(format, args...)) +} + +// Wrap returns a new error based on the passed in error that is contained in +// this class. Wrap returns nil if err is nil. +func (c *Class) Wrap(err error) error { + return c.create(3, err) +} + +// WrapP stores into the error pointer if it contains a non-nil error an error contained +// in this class. WrapP does nothing if the pointer or pointed at error is nil. +func (c *Class) WrapP(err *error) { + if err != nil && *err != nil { + *err = c.create(3, *err) + } +} + +// create constructs the error, or just adds the class to the error, keeping +// track of the stack if it needs to construct it. +func (c *Class) create(depth int, err error) error { + if err == nil { + return nil + } + + var pcs []uintptr + if err, ok := err.(*errorT); ok { + if c == nil || err.class == c { + return err + } + pcs = err.pcs + } + + errt := &errorT{ + class: c, + err: err, + pcs: pcs, + } + + if errt.pcs == nil { + errt.pcs = make([]uintptr, 64) + n := runtime.Callers(depth, errt.pcs) + errt.pcs = errt.pcs[:n:n] + } + + return errt +} + +// +// errors +// + +// errorT is the type of errors returned from this package. +type errorT struct { + class *Class + err error + pcs []uintptr +} + +var ( // ensure *errorT implements the helper interfaces. + _ Namer = (*errorT)(nil) + _ Causer = (*errorT)(nil) + _ error = (*errorT)(nil) +) + +// errorT implements the error interface. +func (e *errorT) Error() string { + return fmt.Sprintf("%v", e) +} + +// Format handles the formatting of the error. Using a "+" on the format string +// specifier will also write the stack trace. +func (e *errorT) Format(f fmt.State, c rune) { + sep := "" + if e.class != nil && *e.class != "" { + fmt.Fprintf(f, "%s", string(*e.class)) + sep = ": " + } + if text := e.err.Error(); len(text) > 0 { + fmt.Fprintf(f, "%s%v", sep, text) + } + if f.Flag(int('+')) { + summarizeStack(f, e.pcs) + } +} + +// Cause implements the interface wrapping errors are expected to implement +// to allow getting at underlying causes. +func (e *errorT) Cause() error { + return e.err +} + +// Unwrap implements the draft design for error inspection. Since this is +// on an unexported type, it should not be hard to maintain going forward +// given that it also is the exact same semantics as Cause. +func (e *errorT) Unwrap() error { + return e.err +} + +// Name returns the name for the error, which is the first wrapping class. +func (e *errorT) Name() (string, bool) { + if e.class == nil { + return "", false + } + return string(*e.class), true +} + +// summarizeStack writes stack line entries to the writer. +func summarizeStack(w io.Writer, pcs []uintptr) { + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + if !more { + return + } + fmt.Fprintf(w, "\n\t%s:%d", frame.Function, frame.Line) + } +} diff --git a/vendor/github.com/zeebo/errs/go.mod b/vendor/github.com/zeebo/errs/go.mod new file mode 100644 index 000000000..884772d2a --- /dev/null +++ b/vendor/github.com/zeebo/errs/go.mod @@ -0,0 +1,3 @@ +module github.com/zeebo/errs + +go 1.12 diff --git a/vendor/github.com/zeebo/errs/group.go b/vendor/github.com/zeebo/errs/group.go new file mode 100644 index 000000000..e5997ec55 --- /dev/null +++ b/vendor/github.com/zeebo/errs/group.go @@ -0,0 +1,100 @@ +package errs + +import ( + "fmt" + "io" +) + +// Group is a list of errors. +type Group []error + +// Combine combines multiple non-empty errors into a single error. +func Combine(errs ...error) error { + var group Group + group.Add(errs...) + return group.Err() +} + +// Add adds non-empty errors to the Group. +func (group *Group) Add(errs ...error) { + for _, err := range errs { + if err != nil { + *group = append(*group, err) + } + } +} + +// Err returns an error containing all of the non-nil errors. +// If there was only one error, it will return it. +// If there were none, it returns nil. +func (group Group) Err() error { + sanitized := group.sanitize() + if len(sanitized) == 0 { + return nil + } + if len(sanitized) == 1 { + return sanitized[0] + } + return combinedError(sanitized) +} + +// sanitize returns group that doesn't contain nil-s +func (group Group) sanitize() Group { + // sanity check for non-nil errors + for i, err := range group { + if err == nil { + sanitized := make(Group, 0, len(group)-1) + sanitized = append(sanitized, group[:i]...) + sanitized.Add(group[i+1:]...) + return sanitized + } + } + + return group +} + +// combinedError is a list of non-empty errors +type combinedError []error + +// Cause returns the first error. +func (group combinedError) Cause() error { + if len(group) > 0 { + return group[0] + } + return nil +} + +// Unwrap returns the first error. +func (group combinedError) Unwrap() error { + return group.Cause() +} + +// Ungroup returns all errors. +func (group combinedError) Ungroup() []error { + return group +} + +// Error returns error string delimited by semicolons. +func (group combinedError) Error() string { return fmt.Sprintf("%v", group) } + +// Format handles the formatting of the error. Using a "+" on the format +// string specifier will cause the errors to be formatted with "+" and +// delimited by newlines. They are delimited by semicolons otherwise. +func (group combinedError) Format(f fmt.State, c rune) { + delim := "; " + if f.Flag(int('+')) { + io.WriteString(f, "group:\n--- ") + delim = "\n--- " + } + + for i, err := range group { + if i != 0 { + io.WriteString(f, delim) + } + if formatter, ok := err.(fmt.Formatter); ok { + formatter.Format(f, c) + } else { + fmt.Fprintf(f, "%v", err) + } + } +} diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/atomic/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore new file mode 100644 index 000000000..0a4504f11 --- /dev/null +++ b/vendor/go.uber.org/atomic/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +/vendor +/cover +cover.out +lint.log + +# Binaries +*.test + +# Profiling output +*.prof diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml new file mode 100644 index 000000000..0f3769e5f --- /dev/null +++ b/vendor/go.uber.org/atomic/.travis.yml @@ -0,0 +1,27 @@ +sudo: false +language: go +go_import_path: go.uber.org/atomic + +go: + - 1.11.x + - 1.12.x + +matrix: + include: + - go: 1.12.x + env: NO_TEST=yes LINT=yes + +cache: + directories: + - vendor + +install: + - make install_ci + +script: + - test -n "$NO_TEST" || make test_ci + - test -n "$NO_TEST" || scripts/test-ubergo.sh + - test -z "$LINT" || make install_lint lint + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt new file mode 100644 index 000000000..8765c9fbc --- /dev/null +++ b/vendor/go.uber.org/atomic/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile new file mode 100644 index 000000000..1ef263075 --- /dev/null +++ b/vendor/go.uber.org/atomic/Makefile @@ -0,0 +1,51 @@ +# Many Go tools take file globs or directories as arguments instead of packages. +PACKAGE_FILES ?= *.go + +# For pre go1.6 +export GO15VENDOREXPERIMENT=1 + + +.PHONY: build +build: + go build -i ./... + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: test +test: + go test -cover -race ./... + + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: install_lint +install_lint: + go get golang.org/x/lint/golint + + +.PHONY: lint +lint: + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PACKAGE_FILES) 2>&1 | tee lint.log + @echo "Checking vet..." + @go vet ./... 2>&1 | tee -a lint.log;) + @echo "Checking lint..." + @golint $$(go list ./...) 2>&1 | tee -a lint.log + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @[ ! -s lint.log ] + + +.PHONY: test_ci +test_ci: install_ci build + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md new file mode 100644 index 000000000..62eb8e576 --- /dev/null +++ b/vendor/go.uber.org/atomic/README.md @@ -0,0 +1,36 @@ +# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard] + +Simple wrappers for primitive types to enforce atomic access. + +## Installation +`go get -u go.uber.org/atomic` + +## Usage +The standard library's `sync/atomic` is powerful, but it's easy to forget which +variables must be accessed atomically. `go.uber.org/atomic` preserves all the +functionality of the standard library, but wraps the primitive types to +provide a safer, more convenient API. + +```go +var atom atomic.Uint32 +atom.Store(42) +atom.Sub(2) +atom.CAS(40, 11) +``` + +See the [documentation][doc] for a complete API specification. + +## Development Status +Stable. + +___ +Released under the [MIT License](LICENSE.txt). + +[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg +[doc]: https://godoc.org/go.uber.org/atomic +[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master +[ci]: https://travis-ci.com/uber-go/atomic +[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/atomic +[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic +[reportcard]: https://goreportcard.com/report/go.uber.org/atomic diff --git a/vendor/go.uber.org/atomic/atomic.go b/vendor/go.uber.org/atomic/atomic.go new file mode 100644 index 000000000..1db6849fc --- /dev/null +++ b/vendor/go.uber.org/atomic/atomic.go @@ -0,0 +1,351 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package atomic provides simple wrappers around numerics to enforce atomic +// access. +package atomic + +import ( + "math" + "sync/atomic" + "time" +) + +// Int32 is an atomic wrapper around an int32. +type Int32 struct{ v int32 } + +// NewInt32 creates an Int32. +func NewInt32(i int32) *Int32 { + return &Int32{i} +} + +// Load atomically loads the wrapped value. +func (i *Int32) Load() int32 { + return atomic.LoadInt32(&i.v) +} + +// Add atomically adds to the wrapped int32 and returns the new value. +func (i *Int32) Add(n int32) int32 { + return atomic.AddInt32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int32 and returns the new value. +func (i *Int32) Sub(n int32) int32 { + return atomic.AddInt32(&i.v, -n) +} + +// Inc atomically increments the wrapped int32 and returns the new value. +func (i *Int32) Inc() int32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Int32) Dec() int32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int32) CAS(old, new int32) bool { + return atomic.CompareAndSwapInt32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int32) Store(n int32) { + atomic.StoreInt32(&i.v, n) +} + +// Swap atomically swaps the wrapped int32 and returns the old value. +func (i *Int32) Swap(n int32) int32 { + return atomic.SwapInt32(&i.v, n) +} + +// Int64 is an atomic wrapper around an int64. +type Int64 struct{ v int64 } + +// NewInt64 creates an Int64. +func NewInt64(i int64) *Int64 { + return &Int64{i} +} + +// Load atomically loads the wrapped value. +func (i *Int64) Load() int64 { + return atomic.LoadInt64(&i.v) +} + +// Add atomically adds to the wrapped int64 and returns the new value. +func (i *Int64) Add(n int64) int64 { + return atomic.AddInt64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped int64 and returns the new value. +func (i *Int64) Sub(n int64) int64 { + return atomic.AddInt64(&i.v, -n) +} + +// Inc atomically increments the wrapped int64 and returns the new value. +func (i *Int64) Inc() int64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int64 and returns the new value. +func (i *Int64) Dec() int64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Int64) CAS(old, new int64) bool { + return atomic.CompareAndSwapInt64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Int64) Store(n int64) { + atomic.StoreInt64(&i.v, n) +} + +// Swap atomically swaps the wrapped int64 and returns the old value. +func (i *Int64) Swap(n int64) int64 { + return atomic.SwapInt64(&i.v, n) +} + +// Uint32 is an atomic wrapper around an uint32. +type Uint32 struct{ v uint32 } + +// NewUint32 creates a Uint32. +func NewUint32(i uint32) *Uint32 { + return &Uint32{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint32) Load() uint32 { + return atomic.LoadUint32(&i.v) +} + +// Add atomically adds to the wrapped uint32 and returns the new value. +func (i *Uint32) Add(n uint32) uint32 { + return atomic.AddUint32(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint32 and returns the new value. +func (i *Uint32) Sub(n uint32) uint32 { + return atomic.AddUint32(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint32 and returns the new value. +func (i *Uint32) Inc() uint32 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped int32 and returns the new value. +func (i *Uint32) Dec() uint32 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint32) CAS(old, new uint32) bool { + return atomic.CompareAndSwapUint32(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint32) Store(n uint32) { + atomic.StoreUint32(&i.v, n) +} + +// Swap atomically swaps the wrapped uint32 and returns the old value. +func (i *Uint32) Swap(n uint32) uint32 { + return atomic.SwapUint32(&i.v, n) +} + +// Uint64 is an atomic wrapper around a uint64. +type Uint64 struct{ v uint64 } + +// NewUint64 creates a Uint64. +func NewUint64(i uint64) *Uint64 { + return &Uint64{i} +} + +// Load atomically loads the wrapped value. +func (i *Uint64) Load() uint64 { + return atomic.LoadUint64(&i.v) +} + +// Add atomically adds to the wrapped uint64 and returns the new value. +func (i *Uint64) Add(n uint64) uint64 { + return atomic.AddUint64(&i.v, n) +} + +// Sub atomically subtracts from the wrapped uint64 and returns the new value. +func (i *Uint64) Sub(n uint64) uint64 { + return atomic.AddUint64(&i.v, ^(n - 1)) +} + +// Inc atomically increments the wrapped uint64 and returns the new value. +func (i *Uint64) Inc() uint64 { + return i.Add(1) +} + +// Dec atomically decrements the wrapped uint64 and returns the new value. +func (i *Uint64) Dec() uint64 { + return i.Sub(1) +} + +// CAS is an atomic compare-and-swap. +func (i *Uint64) CAS(old, new uint64) bool { + return atomic.CompareAndSwapUint64(&i.v, old, new) +} + +// Store atomically stores the passed value. +func (i *Uint64) Store(n uint64) { + atomic.StoreUint64(&i.v, n) +} + +// Swap atomically swaps the wrapped uint64 and returns the old value. +func (i *Uint64) Swap(n uint64) uint64 { + return atomic.SwapUint64(&i.v, n) +} + +// Bool is an atomic Boolean. +type Bool struct{ v uint32 } + +// NewBool creates a Bool. +func NewBool(initial bool) *Bool { + return &Bool{boolToInt(initial)} +} + +// Load atomically loads the Boolean. +func (b *Bool) Load() bool { + return truthy(atomic.LoadUint32(&b.v)) +} + +// CAS is an atomic compare-and-swap. +func (b *Bool) CAS(old, new bool) bool { + return atomic.CompareAndSwapUint32(&b.v, boolToInt(old), boolToInt(new)) +} + +// Store atomically stores the passed value. +func (b *Bool) Store(new bool) { + atomic.StoreUint32(&b.v, boolToInt(new)) +} + +// Swap sets the given value and returns the previous value. +func (b *Bool) Swap(new bool) bool { + return truthy(atomic.SwapUint32(&b.v, boolToInt(new))) +} + +// Toggle atomically negates the Boolean and returns the previous value. +func (b *Bool) Toggle() bool { + return truthy(atomic.AddUint32(&b.v, 1) - 1) +} + +func truthy(n uint32) bool { + return n&1 == 1 +} + +func boolToInt(b bool) uint32 { + if b { + return 1 + } + return 0 +} + +// Float64 is an atomic wrapper around float64. +type Float64 struct { + v uint64 +} + +// NewFloat64 creates a Float64. +func NewFloat64(f float64) *Float64 { + return &Float64{math.Float64bits(f)} +} + +// Load atomically loads the wrapped value. +func (f *Float64) Load() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.v)) +} + +// Store atomically stores the passed value. +func (f *Float64) Store(s float64) { + atomic.StoreUint64(&f.v, math.Float64bits(s)) +} + +// Add atomically adds to the wrapped float64 and returns the new value. +func (f *Float64) Add(s float64) float64 { + for { + old := f.Load() + new := old + s + if f.CAS(old, new) { + return new + } + } +} + +// Sub atomically subtracts from the wrapped float64 and returns the new value. +func (f *Float64) Sub(s float64) float64 { + return f.Add(-s) +} + +// CAS is an atomic compare-and-swap. +func (f *Float64) CAS(old, new float64) bool { + return atomic.CompareAndSwapUint64(&f.v, math.Float64bits(old), math.Float64bits(new)) +} + +// Duration is an atomic wrapper around time.Duration +// https://godoc.org/time#Duration +type Duration struct { + v Int64 +} + +// NewDuration creates a Duration. +func NewDuration(d time.Duration) *Duration { + return &Duration{v: *NewInt64(int64(d))} +} + +// Load atomically loads the wrapped value. +func (d *Duration) Load() time.Duration { + return time.Duration(d.v.Load()) +} + +// Store atomically stores the passed value. +func (d *Duration) Store(n time.Duration) { + d.v.Store(int64(n)) +} + +// Add atomically adds to the wrapped time.Duration and returns the new value. +func (d *Duration) Add(n time.Duration) time.Duration { + return time.Duration(d.v.Add(int64(n))) +} + +// Sub atomically subtracts from the wrapped time.Duration and returns the new value. +func (d *Duration) Sub(n time.Duration) time.Duration { + return time.Duration(d.v.Sub(int64(n))) +} + +// Swap atomically swaps the wrapped time.Duration and returns the old value. +func (d *Duration) Swap(n time.Duration) time.Duration { + return time.Duration(d.v.Swap(int64(n))) +} + +// CAS is an atomic compare-and-swap. +func (d *Duration) CAS(old, new time.Duration) bool { + return d.v.CAS(int64(old), int64(new)) +} + +// Value shadows the type of the same name from sync/atomic +// https://godoc.org/sync/atomic#Value +type Value struct{ atomic.Value } diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go new file mode 100644 index 000000000..0489d19ba --- /dev/null +++ b/vendor/go.uber.org/atomic/error.go @@ -0,0 +1,55 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// Error is an atomic type-safe wrapper around Value for errors +type Error struct{ v Value } + +// errorHolder is non-nil holder for error object. +// atomic.Value panics on saving nil object, so err object needs to be +// wrapped with valid object first. +type errorHolder struct{ err error } + +// NewError creates new atomic error object +func NewError(err error) *Error { + e := &Error{} + if err != nil { + e.Store(err) + } + return e +} + +// Load atomically loads the wrapped error +func (e *Error) Load() error { + v := e.v.Load() + if v == nil { + return nil + } + + eh := v.(errorHolder) + return eh.err +} + +// Store atomically stores error. +// NOTE: a holder object is allocated on each Store call. +func (e *Error) Store(err error) { + e.v.Store(errorHolder{err: err}) +} diff --git a/vendor/go.uber.org/atomic/glide.lock b/vendor/go.uber.org/atomic/glide.lock new file mode 100644 index 000000000..3c72c5997 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.lock @@ -0,0 +1,17 @@ +hash: f14d51408e3e0e4f73b34e4039484c78059cd7fc5f4996fdd73db20dc8d24f53 +updated: 2016-10-27T00:10:51.16960137-07:00 +imports: [] +testImports: +- name: github.com/davecgh/go-spew + version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: d77da356e56a7428ad25149ca77381849a6a5232 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/glide.yaml b/vendor/go.uber.org/atomic/glide.yaml new file mode 100644 index 000000000..4cf608ec0 --- /dev/null +++ b/vendor/go.uber.org/atomic/glide.yaml @@ -0,0 +1,6 @@ +package: go.uber.org/atomic +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go new file mode 100644 index 000000000..ede8136fa --- /dev/null +++ b/vendor/go.uber.org/atomic/string.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package atomic + +// String is an atomic type-safe wrapper around Value for strings. +type String struct{ v Value } + +// NewString creates a String. +func NewString(str string) *String { + s := &String{} + if str != "" { + s.Store(str) + } + return s +} + +// Load atomically loads the wrapped string. +func (s *String) Load() string { + v := s.v.Load() + if v == nil { + return "" + } + return v.(string) +} + +// Store atomically stores the passed string. +// Note: Converting the string to an interface{} to store in the Value +// requires an allocation. +func (s *String) Store(str string) { + s.v.Store(str) +} diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 000000000..61ead8666 --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1 @@ +/vendor diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml new file mode 100644 index 000000000..5ffa8fed4 --- /dev/null +++ b/vendor/go.uber.org/multierr/.travis.yml @@ -0,0 +1,33 @@ +sudo: false +language: go +go_import_path: go.uber.org/multierr + +env: + global: + - GO15VENDOREXPERIMENT=1 + +go: + - 1.7 + - 1.8 + - tip + +cache: + directories: + - vendor + +before_install: +- go version + +install: +- | + set -e + make install_ci + +script: +- | + set -e + make lint + make test_ci + +after_success: +- bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 000000000..898445d06 --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,28 @@ +Releases +======== + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 000000000..858e02475 --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 000000000..a7437d061 --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,74 @@ +export GO15VENDOREXPERIMENT=1 + +PACKAGES := $(shell glide nv) + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + +.PHONY: build +build: + go build -i $(PACKAGES) + +.PHONY: test +test: + go test -cover -race $(PACKAGES) + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: govet +govet: + $(eval VET_LOG := $(shell mktemp -t govet.XXXXX)) + @go vet $(PACKAGES) 2>&1 \ + | grep -v '^exit status' > $(VET_LOG) || true + @[ ! -s "$(VET_LOG)" ] || (echo "govet failed:" | cat - $(VET_LOG) && false) + +.PHONY: golint +golint: + @go get github.com/golang/lint/golint + $(eval LINT_LOG := $(shell mktemp -t golint.XXXXX)) + @cat /dev/null > $(LINT_LOG) + @$(foreach pkg, $(PACKAGES), golint $(pkg) >> $(LINT_LOG) || true;) + @[ ! -s "$(LINT_LOG)" ] || (echo "golint failed:" | cat - $(LINT_LOG) && false) + +.PHONY: staticcheck +staticcheck: + @go get honnef.co/go/tools/cmd/staticcheck + $(eval STATICCHECK_LOG := $(shell mktemp -t staticcheck.XXXXX)) + @staticcheck $(PACKAGES) 2>&1 > $(STATICCHECK_LOG) || true + @[ ! -s "$(STATICCHECK_LOG)" ] || (echo "staticcheck failed:" | cat - $(STATICCHECK_LOG) && false) + +.PHONY: lint +lint: gofmt govet golint staticcheck + +.PHONY: cover +cover: + ./scripts/cover.sh $(shell go list $(PACKAGES)) + go tool cover -html=cover.out -o cover.html + +update-license: + @go get go.uber.org/tools/update-license + @update-license \ + $(shell go list -json $(PACKAGES) | \ + jq -r '.Dir + "/" + (.GoFiles | .[])') + +############################################################################## + +.PHONY: install_ci +install_ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + +.PHONY: test_ci +test_ci: install_ci + ./scripts/cover.sh $(shell go list $(PACKAGES)) diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 000000000..065088f64 --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,23 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Installation + + go get -u go.uber.org/multierr + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg +[doc]: https://godoc.org/go.uber.org/multierr +[ci-img]: https://travis-ci.org/uber-go/multierr.svg?branch=master +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://travis-ci.org/uber-go/multierr +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 000000000..de6ce4736 --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,401 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Combine(reader.Close(), writer.Close()) +// +// This makes it possible to record resource cleanup failures from deferred +// blocks with the help of named return values. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:") +// } +// +// Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "fmt" + "io" + "strings" + "sync" + + "go.uber.org/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + _newline = []byte("\n") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, the returned slice is empty. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + errors := eg.Errors() + result := make([]error, len(errors)) + copy(result, errors) + return result +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +var _ errorGroup = (*multiError)(nil) + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // already flat + return &multiError{errors: errors} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} diff --git a/vendor/go.uber.org/multierr/glide.lock b/vendor/go.uber.org/multierr/glide.lock new file mode 100644 index 000000000..f9ea94c33 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.lock @@ -0,0 +1,19 @@ +hash: b53b5e9a84b9cb3cc4b2d0499e23da2feca1eec318ce9bb717ecf35bf24bf221 +updated: 2017-04-10T13:34:45.671678062-07:00 +imports: +- name: go.uber.org/atomic + version: 3b8db5e93c4c02efbc313e17b2e796b0914a01fb +testImports: +- name: github.com/davecgh/go-spew + version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + subpackages: + - spew +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/stretchr/testify + version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + subpackages: + - assert + - require diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml new file mode 100644 index 000000000..6ef084ec2 --- /dev/null +++ b/vendor/go.uber.org/multierr/glide.yaml @@ -0,0 +1,8 @@ +package: go.uber.org/multierr +import: +- package: go.uber.org/atomic + version: ^1 +testImport: +- package: github.com/stretchr/testify + subpackages: + - assert diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 000000000..8e5ca7d3e --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 000000000..08fbde6ce --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 000000000..c6440db8e --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,108 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.org/uber-go/zap +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/.travis.yml b/vendor/go.uber.org/zap/.travis.yml new file mode 100644 index 000000000..ada5ebdcc --- /dev/null +++ b/vendor/go.uber.org/zap/.travis.yml @@ -0,0 +1,21 @@ +language: go +sudo: false +go: + - 1.11.x + - 1.12.x +go_import_path: go.uber.org/zap +env: + global: + - TEST_TIMEOUT_SCALE=10 +cache: + directories: + - vendor +install: + - make dependencies +script: + - make lint + - make test + - make bench +after_success: + - make cover + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 000000000..28d10677e --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,327 @@ +# Changelog + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +## v1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +## v1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +## v1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +## v1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +## v1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +## v1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +## v1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +## v1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +## v1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +## v1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +## v1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +## v1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +## v1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +## v1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +## v1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +## v1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## v0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 +[#402]: https://github.com/uber-go/zap/pull/402 +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 +[#487]: https://github.com/uber-go/zap/pull/487 +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 +[#504]: https://github.com/uber-go/zap/pull/504 +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 +[#614]: https://github.com/uber-go/zap/pull/614 +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..e327d9aa5 --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 000000000..9454bbaf0 --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,81 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +``` +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Install zap's dependencies: + +``` +make dependencies +``` + +Make sure that the tests and the linters pass: + +``` +make test +make lint +``` + +If you're not using the minor version of Go specified in the Makefile's +`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is +fine, but it means that you'll only discover lint failures after you open your +pull request. + +## Making Changes + +Start by creating a new branch for your changes: + +``` +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +``` +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We *try* to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +* Add tests for new functionality. +* Write a [good commit message][commit-message]. +* Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 000000000..4256d35c7 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,155 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt new file mode 100644 index 000000000..6652bed45 --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 000000000..073e9aa91 --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,76 @@ +export GO15VENDOREXPERIMENT=1 + +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem +PKGS ?= $(shell glide novendor) +# Many Go tools take file globs or directories as arguments instead of packages. +PKG_FILES ?= *.go zapcore benchmarks buffer zapgrpc zaptest zaptest/observer internal/bufferpool internal/exit internal/color internal/ztest + +# The linting tools evolve with each Go version, so run them only on the latest +# stable release. +GO_VERSION := $(shell go version | cut -d " " -f 3) +GO_MINOR_VERSION := $(word 2,$(subst ., ,$(GO_VERSION))) +LINTABLE_MINOR_VERSIONS := 12 +ifneq ($(filter $(LINTABLE_MINOR_VERSIONS),$(GO_MINOR_VERSION)),) +SHOULD_LINT := true +endif + + +.PHONY: all +all: lint test + +.PHONY: dependencies +dependencies: + @echo "Installing Glide and locked dependencies..." + glide --version || go get -u -f github.com/Masterminds/glide + glide install + @echo "Installing test dependencies..." + go install ./vendor/github.com/axw/gocov/gocov + go install ./vendor/github.com/mattn/goveralls +ifdef SHOULD_LINT + @echo "Installing golint..." + go install ./vendor/github.com/golang/lint/golint +else + @echo "Not installing golint, since we don't expect to lint on" $(GO_VERSION) +endif + +# Disable printf-like invocation checking due to testify.assert.Error() +VET_RULES := -printf=false + +.PHONY: lint +lint: +ifdef SHOULD_LINT + @rm -rf lint.log + @echo "Checking formatting..." + @gofmt -d -s $(PKG_FILES) 2>&1 | tee lint.log + @echo "Installing test dependencies for vet..." + @go test -i $(PKGS) + @echo "Checking vet..." + @go vet $(VET_RULES) $(PKGS) 2>&1 | tee -a lint.log + @echo "Checking lint..." + @$(foreach dir,$(PKGS),golint $(dir) 2>&1 | tee -a lint.log;) + @echo "Checking for unresolved FIXMEs..." + @git grep -i fixme | grep -v -e vendor -e Makefile | tee -a lint.log + @echo "Checking for license headers..." + @./check_license.sh | tee -a lint.log + @[ ! -s lint.log ] +else + @echo "Skipping linters on" $(GO_VERSION) +endif + +.PHONY: test +test: + go test -race $(PKGS) + +.PHONY: cover +cover: + ./scripts/cover.sh $(PKGS) + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach pkg,$(PKGS),go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) $(pkg);) + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 000000000..f4fd1cb44 --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,136 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +Blazing fast, structured, leveled logging in Go. + +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Objects Allocated | +| :--- | :---: | :---: | +| :zap: zap | 3131 ns/op | 5 allocs/op | +| :zap: zap (sugared) | 4173 ns/op | 21 allocs/op | +| zerolog | 16154 ns/op | 90 allocs/op | +| lion | 16341 ns/op | 111 allocs/op | +| go-kit | 17049 ns/op | 126 allocs/op | +| logrus | 23662 ns/op | 142 allocs/op | +| log15 | 36351 ns/op | 149 allocs/op | +| apex/log | 42530 ns/op | 126 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Objects Allocated | +| :--- | :---: | :---: | +| :zap: zap | 380 ns/op | 0 allocs/op | +| :zap: zap (sugared) | 564 ns/op | 2 allocs/op | +| zerolog | 321 ns/op | 0 allocs/op | +| lion | 7092 ns/op | 39 allocs/op | +| go-kit | 20226 ns/op | 115 allocs/op | +| logrus | 22312 ns/op | 130 allocs/op | +| log15 | 28788 ns/op | 79 allocs/op | +| apex/log | 42063 ns/op | 115 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Objects Allocated | +| :--- | :---: | :---: | +| :zap: zap | 361 ns/op | 0 allocs/op | +| :zap: zap (sugared) | 534 ns/op | 2 allocs/op | +| zerolog | 323 ns/op | 0 allocs/op | +| standard library | 575 ns/op | 2 allocs/op | +| go-kit | 922 ns/op | 13 allocs/op | +| lion | 1413 ns/op | 10 allocs/op | +| logrus | 2291 ns/op | 27 allocs/op | +| apex/log | 3690 ns/op | 11 allocs/op | +| log15 | 5954 ns/op | 26 allocs/op | + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE.txt). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in zap's [glide.lock][] file. [↩](#anchor-versions) + +[doc-img]: https://godoc.org/go.uber.org/zap?status.svg +[doc]: https://godoc.org/go.uber.org/zap +[ci-img]: https://travis-ci.org/uber-go/zap.svg?branch=master +[ci]: https://travis-ci.org/uber-go/zap +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 000000000..5be3704a3 --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,320 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 000000000..7592e8c63 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,115 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import "strconv" + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 000000000..8fb3e202c --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,49 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import "sync" + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *sync.Pool +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{p: &sync.Pool{ + New: func() interface{} { + return &Buffer{bs: make([]byte, 0, _size)} + }, + }} +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get().(*Buffer) + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/check_license.sh b/vendor/go.uber.org/zap/check_license.sh new file mode 100644 index 000000000..345ac8b89 --- /dev/null +++ b/vendor/go.uber.org/zap/check_license.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 000000000..6fe17d9e0 --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,243 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// Values configured here are per-second. See zapcore.NewSampler for details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig is a reasonable production logging configuration. +// Logging is enabled at InfoLevel and above. +// +// It uses a JSON encoder, writes to standard error, and enables sampling. +// Stacktraces are automatically included on logs of ErrorLevel and above. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig is a reasonable development logging configuration. +// Logging is enabled at DebugLevel and above. +// +// It enables development mode (which makes DPanicLevel logs panic), uses a +// console encoder, writes to standard error, and disables sampling. +// Stacktraces are automatically included on logs of WarnLevel and above. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if cfg.Sampling != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewSampler(core, time.Second, int(cfg.Sampling.Initial), int(cfg.Sampling.Thereafter)) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 000000000..8638dd1b9 --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 000000000..2e9d3c341 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,75 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 000000000..65982a51e --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,80 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync" + + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get().(*errArrayElem) + elem.error = errs[i] + arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 000000000..5130e1347 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,310 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, takeStacktrace()) +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + switch val := value.(type) { + case zapcore.ObjectMarshaler: + return Object(key, val) + case zapcore.ArrayMarshaler: + return Array(key, val) + case bool: + return Bool(key, val) + case []bool: + return Bools(key, val) + case complex128: + return Complex128(key, val) + case []complex128: + return Complex128s(key, val) + case complex64: + return Complex64(key, val) + case []complex64: + return Complex64s(key, val) + case float64: + return Float64(key, val) + case []float64: + return Float64s(key, val) + case float32: + return Float32(key, val) + case []float32: + return Float32s(key, val) + case int: + return Int(key, val) + case []int: + return Ints(key, val) + case int64: + return Int64(key, val) + case []int64: + return Int64s(key, val) + case int32: + return Int32(key, val) + case []int32: + return Int32s(key, val) + case int16: + return Int16(key, val) + case []int16: + return Int16s(key, val) + case int8: + return Int8(key, val) + case []int8: + return Int8s(key, val) + case string: + return String(key, val) + case []string: + return Strings(key, val) + case uint: + return Uint(key, val) + case []uint: + return Uints(key, val) + case uint64: + return Uint64(key, val) + case []uint64: + return Uint64s(key, val) + case uint32: + return Uint32(key, val) + case []uint32: + return Uint32s(key, val) + case uint16: + return Uint16(key, val) + case []uint16: + return Uint16s(key, val) + case uint8: + return Uint8(key, val) + case []byte: + return Binary(key, val) + case uintptr: + return Uintptr(key, val) + case []uintptr: + return Uintptrs(key, val) + case time.Time: + return Time(key, val) + case []time.Time: + return Times(key, val) + case time.Duration: + return Duration(key, val) + case []time.Duration: + return Durations(key, val) + case error: + return NamedError(key, val) + case []error: + return Errors(key, val) + case fmt.Stringer: + return Stringer(key, val) + default: + return Reflect(key, val) + } +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 000000000..131287507 --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.lock b/vendor/go.uber.org/zap/glide.lock new file mode 100644 index 000000000..881b462c0 --- /dev/null +++ b/vendor/go.uber.org/zap/glide.lock @@ -0,0 +1,76 @@ +hash: f073ba522c06c88ea3075bde32a8aaf0969a840a66cab6318a0897d141ffee92 +updated: 2017-07-22T18:06:49.598185334-07:00 +imports: +- name: go.uber.org/atomic + version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf +- name: go.uber.org/multierr + version: 3c4937480c32f4c13a875a1829af76c98ca3d40a +testImports: +- name: github.com/apex/log + version: d9b960447bfa720077b2da653cc79e533455b499 + subpackages: + - handlers/json +- name: github.com/axw/gocov + version: 3a69a0d2a4ef1f263e2d92b041a69593d6964fe8 + subpackages: + - gocov +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew +- name: github.com/fatih/color + version: 62e9147c64a1ed519147b62a56a14e83e2be02c1 +- name: github.com/go-kit/kit + version: e10f5bf035be9af21fd5b2fb4469d5716c6ab07d + subpackages: + - log +- name: github.com/go-logfmt/logfmt + version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 +- name: github.com/go-stack/stack + version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b +- name: github.com/golang/lint + version: c5fb716d6688a859aae56d26d3e6070808df29f7 + subpackages: + - golint +- name: github.com/kr/logfmt + version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 +- name: github.com/mattn/go-colorable + version: 3fa8c76f9daed4067e4a806fb7e4dc86455c6d6a +- name: github.com/mattn/go-isatty + version: fc9e8d8ef48496124e79ae0df75490096eccf6fe +- name: github.com/mattn/goveralls + version: 6efce81852ad1b7567c17ad71b03aeccc9dd9ae0 +- name: github.com/pborman/uuid + version: e790cca94e6cc75c7064b1332e63811d4aae1a53 +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/rs/zerolog + version: eed4c2b94d945e0b2456ad6aa518a443986b5f22 +- name: github.com/satori/go.uuid + version: 5bf94b69c6b68ee1b541973bb8e1144db23a194b +- name: github.com/sirupsen/logrus + version: 7dd06bf38e1e13df288d471a57d5adbac106be9e +- name: github.com/stretchr/testify + version: f6abca593680b2315d2075e0f5e2a9751e3f431a + subpackages: + - assert + - require +- name: go.pedge.io/lion + version: 87958e8713f1fa138d993087133b97e976642159 +- name: golang.org/x/sys + version: c4489faa6e5ab84c0ef40d6ee878f7a030281f0f + subpackages: + - unix +- name: golang.org/x/tools + version: 496819729719f9d07692195e0a94d6edd2251389 + subpackages: + - cover +- name: gopkg.in/inconshreveable/log15.v2 + version: b105bd37f74e5d9dc7b6ad7806715c7a2b83fd3f + subpackages: + - stack + - term diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 000000000..94412594c --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,35 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: go.pedge.io/lion +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: github.com/golang/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 000000000..c1ac0507c --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,168 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go new file mode 100644 index 000000000..6b5dbda80 --- /dev/null +++ b/vendor/go.uber.org/zap/global_go112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build go1.12 + +package zap + +const _stdLogDefaultDepth = 1 diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go new file mode 100644 index 000000000..d3ab9af93 --- /dev/null +++ b/vendor/go.uber.org/zap/global_prego112.go @@ -0,0 +1,26 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// See #682 for more information. +// +build !go1.12 + +package zap + +const _stdLogDefaultDepth = 2 diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 000000000..1b0ecaca9 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "fmt" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// GET requests return a JSON description of the current logging level. PUT +// requests change the logging level and expect a payload like: +// {"level":"info"} +// +// It's perfectly safe to change the logging level while a program is running. +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level *zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + + case http.MethodGet: + current := lvl.Level() + enc.Encode(payload{Level: ¤t}) + + case http.MethodPut: + var req payload + + if errmess := func() string { + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + return fmt.Sprintf("Request body must be well-formed JSON: %v", err) + } + if req.Level == nil { + return "Must specify a logging level." + } + return "" + }(); errmess != "" { + w.WriteHeader(http.StatusBadRequest) + enc.Encode(errorResponse{Error: errmess}) + return + } + + lvl.SetLevel(*req.Level) + enc.Encode(req) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 000000000..dad583aaa --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 000000000..c4d5d02ab --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 000000000..dfc5b05fe --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,64 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var real = func() { os.Exit(1) } + +// Exit normally terminates the process by calling os.Exit(1). If the package +// is stubbed, it instead records a call in the testing spy. +func Exit() { + real() +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + prev func() +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: real} + real = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + real = se.prev +} + +func (se *StubbedExit) exit() { + se.Exited = true +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 000000000..3567a9a1e --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,132 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/atomic" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + return AtomicLevel{ + l: atomic.NewInt32(int32(InfoLevel)), + } +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 000000000..dc8f6e3a4 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,305 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + name string + errorOutput zapcore.WriteSyncer + + addCaller bool + addStack zapcore.LevelEnabler + + callerSkip int +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(ioutil.Discard), + addStack: zapcore.FatalLevel + 1, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +func (log *Logger) clone() *Logger { + copy := *log + return © +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // check must always be called directly by a method in the Logger interface + // (e.g., Check, Info, Fatal). + const callerSkipOffset = 2 + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: time.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.Should(ent, zapcore.WriteThenPanic) + case zapcore.FatalLevel: + ce = ce.Should(ent, zapcore.WriteThenFatal) + case zapcore.DPanicLevel: + if log.development { + ce = ce.Should(ent, zapcore.WriteThenPanic) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + if log.addCaller { + ce.Entry.Caller = zapcore.NewEntryCaller(runtime.Caller(log.callerSkip + callerSkipOffset)) + if !ce.Entry.Caller.Defined { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", time.Now().UTC()) + log.errorOutput.Sync() + } + } + if log.addStack.Enabled(ce.Entry.Level) { + ce.Entry.Stack = Stack("").String + } + + return ce +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 000000000..7a6b0fca1 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "go.uber.org/zap/zapcore" + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename +// and line number of zap's caller. +func AddCaller() Option { + return optionFunc(func(log *Logger) { + log.addCaller = true + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 000000000..ff0becfe5 --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,161 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var ( + _sinkMutex sync.RWMutex + _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme +) + +func init() { + resetSinkRegistry() +} + +func resetSinkRegistry() { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + _sinkFactories = map[string]func(*url.URL) (Sink, error){ + schemeFile: newFileSink, + } +} + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 3.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + _sinkMutex.Lock() + defer _sinkMutex.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := _sinkFactories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + _sinkFactories[normalized] = factory + return nil +} + +func newSink(rawURL string) (Sink, error) { + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + _sinkMutex.RLock() + factory, ok := _sinkFactories[u.Scheme] + _sinkMutex.RUnlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +func newFileSink(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + switch u.Path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go new file mode 100644 index 000000000..100fac216 --- /dev/null +++ b/vendor/go.uber.org/zap/stacktrace.go @@ -0,0 +1,126 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "runtime" + "strings" + "sync" + + "go.uber.org/zap/internal/bufferpool" +) + +const _zapPackage = "go.uber.org/zap" + +var ( + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } + + // We add "." and "/" suffixes to the package name to ensure we only match + // the exact package and not any package with the same prefix. + _zapStacktracePrefixes = addPrefix(_zapPackage, ".", "/") + _zapStacktraceVendorContains = addPrefix("/vendor/", _zapStacktracePrefixes...) +) + +func takeStacktrace() string { + buffer := bufferpool.Get() + defer buffer.Free() + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var numFrames int + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + numFrames = runtime.Callers(2, programCounters.pcs) + if numFrames < len(programCounters.pcs) { + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + skipZapFrames := true // skip all consecutive zap frames at the beginning. + frames := runtime.CallersFrames(programCounters.pcs[:numFrames]) + + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if skipZapFrames && isZapFrame(frame.Function) { + continue + } else { + skipZapFrames = false + } + + if i != 0 { + buffer.AppendByte('\n') + } + i++ + buffer.AppendString(frame.Function) + buffer.AppendByte('\n') + buffer.AppendByte('\t') + buffer.AppendString(frame.File) + buffer.AppendByte(':') + buffer.AppendInt(int64(frame.Line)) + } + + return buffer.String() +} + +func isZapFrame(function string) bool { + for _, prefix := range _zapStacktracePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + + // We can't use a prefix match here since the location of the vendor + // directory affects the prefix. Instead we do a contains match. + for _, contains := range _zapStacktraceVendorContains { + if strings.Contains(function, contains) { + return true + } + } + + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} + +func addPrefix(prefix string, ss ...string) []string { + withPrefix := make([]string, len(ss)) + for i, s := range ss { + withPrefix[i] = prefix + s + } + return withPrefix +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 000000000..77ca227f4 --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,304 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes three methods: one for loosely-typed +// structured logging, one for println-style formatting, and one for +// printf-style formatting. For example, SugaredLoggers can produce InfoLevel +// output with Infow ("info with" structured context), Info, or Infof. +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// Debug uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error uses fmt.Sprint to construct and log a message. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + // Format with Sprint, Sprintf, or neither. + msg := template + if msg == "" && len(fmtArgs) > 0 { + msg = fmt.Sprint(fmtArgs...) + } else if msg != "" && len(fmtArgs) > 0 { + msg = fmt.Sprintf(template, fmtArgs...) + } + + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields := make([]Field, 0, len(args)) + var invalid invalidPairs + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.DPanic(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.DPanic(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 000000000..c5a1f1622 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 000000000..86a709ab0 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,99 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "io/ioutil" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, close, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, close, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + close := func() { + for _, c := range closers { + c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + close() + return writers, nil, openErr + } + + return writers, close, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(ioutil.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 000000000..b7875966f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,147 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +var _sliceEncoderPool = sync.Pool{ + New: func() interface{} { + return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)} + }, +} + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get().(*sliceArrayEncoder) +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined && c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + for i := range arr.elems { + if i > 0 { + line.AppendByte('\t') + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addTabIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + if c.LineEnding != "" { + line.AppendString(c.LineEnding) + } else { + line.AppendString(DefaultLineEnding) + } + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer context.buf.Free() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addTabIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addTabIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendByte('\t') + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 000000000..a1ef8b034 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 000000000..31000e91f --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 000000000..f0509522b --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,348 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// A LevelEncoder serializes a Level to a primitive type. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02T15:04:05.000Z0700")) +} + +// UnmarshalText unmarshals text to a TimeEncoder. "iso8601" and "ISO8601" are +// unmarshaled to ISO8601TimeEncoder, "millis" is unmarshaled to +// EpochMillisTimeEncoder, and anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it's slow + // and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 000000000..7d9893f33 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,257 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "strings" + "sync" + "time" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + + "go.uber.org/multierr" +) + +var ( + _cePool = sync.Pool{New: func() interface{} { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } + }} +) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get().(*CheckedEntry) + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes a fatal os.Exit after Write. + WriteThenFatal +) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or Should on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + should CheckWriteAction + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.should = WriteThenNoop + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", time.Now(), ce.Entry) + ce.ErrorOutput.Sync() + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if ce.ErrorOutput != nil { + if err != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", time.Now(), err) + ce.ErrorOutput.Sync() + } + } + + should, msg := ce.should, ce.Message + putCheckedEntry(ce) + + switch should { + case WriteThenPanic: + panic(msg) + case WriteThenFatal: + exit.Exit() + } +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.should = should + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 000000000..a67c7bacc --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,120 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "sync" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) error { + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +type causer interface { + // Provides access to the error that caused this error. + Cause() error +} + +// Note that errArry and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + arr.AppendObject(el) + el.Free() + } + return nil +} + +var _errArrayElemPool = sync.Pool{New: func() interface{} { + return &errArrayElem{} +}} + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get().(*errArrayElem) + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 000000000..ae772e4a1 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,212 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex128. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time. + TimeType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (err error) { + defer func() { + if v := recover(); v != nil { + err = fmt.Errorf("PANIC=%v", v) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 000000000..5db4afb30 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,68 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 000000000..9aec4eada --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,505 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "encoding/json" + "math" + "sync" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = sync.Pool{New: func() interface{} { + return &jsonEncoder{} +}} + +func getJSONEncoder() *jsonEncoder { + return _jsonPool.Get().(*jsonEncoder) +} + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc *json.Encoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// {"foo":"bar","foo":"baz"} +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = json.NewEncoder(enc.reflectBuf) + + // For consistency with our custom JSON encoder. + enc.reflectEnc.SetEscapeHTML(false) + } else { + enc.reflectBuf.Reset() + } +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(obj) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addKey(key) + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendComplex128(val complex128) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, 64) + enc.buf.AppendByte('+') + enc.buf.AppendFloat(i, 64) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + enc.EncodeDuration(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + enc.resetReflectBuf() + err := enc.reflectEnc.Encode(val) + if err != nil { + return err + } + enc.reflectBuf.TrimNewline() + enc.addElementSeparator() + _, err = enc.buf.Write(enc.reflectBuf.Bytes()) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + enc.EncodeTime(val, enc) + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) } +func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) } +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := getJSONEncoder() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined && final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + if final.LineEnding != "" { + final.buf.AppendString(final.LineEnding) + } else { + final.buf.AppendString(DefaultLineEnding) + } + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRuneInString(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.AppendString(s[i : i+size]) + i += size + } +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + for i := 0; i < len(s); { + if enc.tryAddRuneSelf(s[i]) { + i++ + continue + } + r, size := utf8.DecodeRune(s[i:]) + if enc.tryAddRuneError(r, size) { + i++ + continue + } + enc.buf.Write(s[i : i+size]) + i += size + } +} + +// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte. +func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool { + if b >= utf8.RuneSelf { + return false + } + if 0x20 <= b && b != '\\' && b != '"' { + enc.buf.AppendByte(b) + return true + } + switch b { + case '\\', '"': + enc.buf.AppendByte('\\') + enc.buf.AppendByte(b) + case '\n': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('n') + case '\r': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('r') + case '\t': + enc.buf.AppendByte('\\') + enc.buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + enc.buf.AppendString(`\u00`) + enc.buf.AppendByte(_hex[b>>4]) + enc.buf.AppendByte(_hex[b&0xF]) + } + return true +} + +func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool { + if r == utf8.RuneError && size == 1 { + enc.buf.AppendString(`\ufffd`) + return true + } + return false +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 000000000..e575c9f43 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,175 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel +) + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 000000000..7af8dadcb --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 000000000..2627a653d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 000000000..dfead0829 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 000000000..e31641863 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,134 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "time" + + "go.uber.org/atomic" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Inc() + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CAS(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Inc() + } + + return 1 +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 +} + +// NewSampler creates a Core that samples incoming entries, which caps the CPU +// and I/O load of logging while attempting to preserve a representative subset +// of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + } +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (n-s.first)%s.thereafter != 0 { + return ce + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 000000000..07a32eef9 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,81 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 000000000..209e25fe2 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,123 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + // Copy to protect against https://github.com/golang/go/issues/7809 + return multiWriteSyncer(append([]WriteSyncer(nil), ws...)) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 000000000..b423feaea --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// +// Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// +// Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 000000000..10f46948d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 000000000..2fc1ec031 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,60 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 000000000..74a6e7332 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,243 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 000000000..a481b2243 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 000000000..baf7b551d --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 000000000..c160e1a4e --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,289 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 000000000..4d31dd0fd --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 000000000..5593b1b3d --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,750 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7,amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + MOVQ SP, R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ CX, 16(SP) + XORQ CX, CX + MOVQ CX, 24(SP) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(SP) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(SP) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(SP) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(SP), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(SP) + VMOVDQA Y13, 64(SP) + VMOVDQA Y14, 96(SP) + VMOVDQA Y15, 128(SP) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(SP) + VMOVDQA Y13, 192(SP) + VMOVDQA Y14, 224(SP) + VMOVDQA Y15, 256(SP) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5) + ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + MOVQ DX, SP + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(SP), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(SP) + VMOVDQA X13, 32(SP) + VMOVDQA X14, 48(SP) + VMOVDQA X15, 64(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(SP) + VMOVDQA X13, 96(SP) + VMOVDQA X14, 112(SP) + VMOVDQA X15, 128(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(SP) + VMOVDQA X13, 160(SP) + VMOVDQA X14, 176(SP) + VMOVDQA X15, 192(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(SP) + VMOVDQA X13, 224(SP) + VMOVDQA X14, 240(SP) + VMOVDQA X15, 256(SP) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 000000000..30e2fcd58 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7,amd64,!gccgo,!appengine + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 000000000..578e947b3 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,281 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, BP + MOVQ SP, R9 + ADDQ $15, R9 + ANDQ $~15, R9 + MOVQ R9, SP + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(SP) + XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(SP), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(SP) + MOVO X9, 32(SP) + MOVO X10, 48(SP) + MOVO X11, 64(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(SP) + MOVO X9, 96(SP) + MOVO X10, 112(SP) + MOVO X11, 128(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(SP) + MOVO X9, 160(SP) + MOVO X10, 176(SP) + MOVO X11, 192(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(SP) + MOVO X9, 224(SP) + MOVO X10, 240(SP) + MOVO X11, 256(SP) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + MOVQ BP, SP + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 000000000..3168a8aa3 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 000000000..da156a1ba --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 000000000..52c414db0 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 000000000..efd689af4 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,32 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 000000000..7f096fef0 --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2ecf9eee7..a56f26b36 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -70,6 +70,10 @@ github.com/aws/aws-sdk-go/service/sts/stsiface github.com/beorn7/perks/quantile # github.com/billziss-gh/cgofuse v1.2.0 github.com/billziss-gh/cgofuse/fuse +# github.com/btcsuite/btcutil v1.0.1 +github.com/btcsuite/btcutil/base58 +# github.com/calebcase/tmpfile v1.0.1 +github.com/calebcase/tmpfile # github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 # github.com/coreos/go-semver v0.3.0 @@ -94,6 +98,8 @@ github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team_common github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team_policies github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users_common +# github.com/gogo/protobuf v1.2.1 +github.com/gogo/protobuf/proto # github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/groupcache/lru # github.com/golang/protobuf v1.3.3 @@ -140,6 +146,8 @@ github.com/mattn/go-isatty github.com/mattn/go-runewidth # github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5 +github.com/minio/sha256-simd # github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-homedir # github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 @@ -192,6 +200,11 @@ github.com/shurcooL/sanitized_anchor_name github.com/sirupsen/logrus # github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 github.com/skratchdot/open-golang/open +# github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 +github.com/spacemonkeygo/errors +# github.com/spacemonkeygo/monkit/v3 v3.0.6 +github.com/spacemonkeygo/monkit/v3 +github.com/spacemonkeygo/monkit/v3/monotime # github.com/spf13/cobra v1.0.0 github.com/spf13/cobra github.com/spf13/cobra/doc @@ -202,6 +215,8 @@ github.com/stretchr/testify/assert github.com/stretchr/testify/require # github.com/t3rm1n4l/go-mega v0.0.0-20200117211730-79a813bb328d github.com/t3rm1n4l/go-mega +# github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b +github.com/vivint/infectious # github.com/xanzy/ssh-agent v0.2.1 github.com/xanzy/ssh-agent # github.com/youmark/pkcs8 v0.0.0-20191102193632-94c173a94d60 @@ -218,6 +233,8 @@ github.com/yunify/qingstor-sdk-go/v3/request/signer github.com/yunify/qingstor-sdk-go/v3/request/unpacker github.com/yunify/qingstor-sdk-go/v3/service github.com/yunify/qingstor-sdk-go/v3/utils +# github.com/zeebo/errs v1.2.2 +github.com/zeebo/errs # go.etcd.io/bbolt v1.3.3 go.etcd.io/bbolt # go.opencensus.io v0.22.3 @@ -237,10 +254,23 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.uber.org/atomic v1.4.0 +go.uber.org/atomic +# go.uber.org/multierr v1.1.0 +go.uber.org/multierr +# go.uber.org/zap v1.10.0 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/zapcore # goftp.io/server v0.3.2 goftp.io/server # golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d +golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/curve25519 @@ -284,6 +314,7 @@ golang.org/x/oauth2/jws golang.org/x/oauth2/jwt # golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e golang.org/x/sync/errgroup +golang.org/x/sync/semaphore # golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae golang.org/x/sys/cpu golang.org/x/sys/unix @@ -362,3 +393,54 @@ google.golang.org/grpc/status google.golang.org/grpc/tap # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 +# storj.io/common v0.0.0-20200429074521-4ba140e4b747 +storj.io/common/encryption +storj.io/common/errs2 +storj.io/common/fpath +storj.io/common/identity +storj.io/common/internal/grpchook +storj.io/common/macaroon +storj.io/common/memory +storj.io/common/netutil +storj.io/common/paths +storj.io/common/pb +storj.io/common/peertls +storj.io/common/peertls/extensions +storj.io/common/peertls/tlsopts +storj.io/common/pkcrypto +storj.io/common/ranger +storj.io/common/readcloser +storj.io/common/rpc +storj.io/common/rpc/rpcpeer +storj.io/common/rpc/rpcpool +storj.io/common/rpc/rpcstatus +storj.io/common/rpc/rpctracing +storj.io/common/signing +storj.io/common/storj +storj.io/common/sync2 +storj.io/common/uuid +# storj.io/drpc v0.0.11 +storj.io/drpc +storj.io/drpc/drpcconn +storj.io/drpc/drpcctx +storj.io/drpc/drpcdebug +storj.io/drpc/drpcerr +storj.io/drpc/drpcmanager +storj.io/drpc/drpcmetadata +storj.io/drpc/drpcmetadata/invoke +storj.io/drpc/drpcmux +storj.io/drpc/drpcsignal +storj.io/drpc/drpcstream +storj.io/drpc/drpcwire +# storj.io/uplink v1.0.5 +storj.io/uplink +storj.io/uplink/internal/expose +storj.io/uplink/internal/telemetryclient +storj.io/uplink/private/ecclient +storj.io/uplink/private/eestream +storj.io/uplink/private/metainfo +storj.io/uplink/private/metainfo/kvmetainfo +storj.io/uplink/private/piecestore +storj.io/uplink/private/storage/segments +storj.io/uplink/private/storage/streams +storj.io/uplink/private/stream diff --git a/vendor/storj.io/common/LICENSE b/vendor/storj.io/common/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/storj.io/common/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/storj.io/common/encryption/aesgcm.go b/vendor/storj.io/common/encryption/aesgcm.go new file mode 100644 index 000000000..877cc3ee9 --- /dev/null +++ b/vendor/storj.io/common/encryption/aesgcm.go @@ -0,0 +1,165 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "crypto/aes" + "crypto/cipher" + + "storj.io/common/storj" +) + +type aesgcmEncrypter struct { + blockSize int + key *storj.Key + startingNonce *AESGCMNonce + overhead int + aesgcm cipher.AEAD +} + +// NewAESGCMEncrypter returns a Transformer that encrypts the data passing +// through with key. +// +// startingNonce is treated as a big-endian encoded unsigned +// integer, and as blocks pass through, their block number and the starting +// nonce is added together to come up with that block's nonce. Encrypting +// different data with the same key and the same nonce is a huge security +// issue. It's safe to always encode new data with a random key and random +// startingNonce. The monotonically-increasing nonce (that rolls over) is to +// protect against data reordering. +// +// When in doubt, generate a new key from crypto/rand and a startingNonce +// from crypto/rand as often as possible. +func NewAESGCMEncrypter(key *storj.Key, startingNonce *AESGCMNonce, encryptedBlockSize int) (Transformer, error) { + block, err := aes.NewCipher(key[:]) + if err != nil { + return nil, Error.Wrap(err) + } + aesgcmEncrypt, err := cipher.NewGCM(block) + if err != nil { + return nil, Error.Wrap(err) + } + if encryptedBlockSize <= aesgcmEncrypt.Overhead() { + return nil, ErrInvalidConfig.New("encrypted block size %d too small", encryptedBlockSize) + } + return &aesgcmEncrypter{ + blockSize: encryptedBlockSize - aesgcmEncrypt.Overhead(), + key: key, + startingNonce: startingNonce, + overhead: aesgcmEncrypt.Overhead(), + aesgcm: aesgcmEncrypt, + }, nil +} + +func (s *aesgcmEncrypter) InBlockSize() int { + return s.blockSize +} + +func (s *aesgcmEncrypter) OutBlockSize() int { + return s.blockSize + s.overhead +} + +func calcGCMNonce(startingNonce *AESGCMNonce, blockNum int64) (rv [12]byte, err error) { + if copy(rv[:], (*startingNonce)[:]) != len(rv) { + return rv, Error.New("didn't copy memory?!") + } + _, err = incrementBytes(rv[:], blockNum) + return rv, err +} + +func (s *aesgcmEncrypter) Transform(out, in []byte, blockNum int64) ([]byte, error) { + nonce, err := calcGCMNonce(s.startingNonce, blockNum) + if err != nil { + return nil, err + } + + cipherData := s.aesgcm.Seal(out, nonce[:], in, nil) + return cipherData, nil +} + +type aesgcmDecrypter struct { + blockSize int + key *storj.Key + startingNonce *AESGCMNonce + overhead int + aesgcm cipher.AEAD +} + +// NewAESGCMDecrypter returns a Transformer that decrypts the data passing +// through with key. See the comments for NewAESGCMEncrypter about +// startingNonce. +func NewAESGCMDecrypter(key *storj.Key, startingNonce *AESGCMNonce, encryptedBlockSize int) (Transformer, error) { + block, err := aes.NewCipher(key[:]) + if err != nil { + return nil, Error.Wrap(err) + } + aesgcmDecrypt, err := cipher.NewGCM(block) + if err != nil { + return nil, Error.Wrap(err) + } + if encryptedBlockSize <= aesgcmDecrypt.Overhead() { + return nil, ErrInvalidConfig.New("encrypted block size %d too small", encryptedBlockSize) + } + return &aesgcmDecrypter{ + blockSize: encryptedBlockSize - aesgcmDecrypt.Overhead(), + key: key, + startingNonce: startingNonce, + overhead: aesgcmDecrypt.Overhead(), + aesgcm: aesgcmDecrypt, + }, nil +} +func (s *aesgcmDecrypter) InBlockSize() int { + return s.blockSize + s.overhead +} + +func (s *aesgcmDecrypter) OutBlockSize() int { + return s.blockSize +} + +func (s *aesgcmDecrypter) Transform(out, in []byte, blockNum int64) ([]byte, error) { + nonce, err := calcGCMNonce(s.startingNonce, blockNum) + if err != nil { + return nil, err + } + + plainData, err := s.aesgcm.Open(out, nonce[:], in, nil) + if err != nil { + return nil, ErrDecryptFailed.Wrap(err) + } + return plainData, nil +} + +// EncryptAESGCM encrypts byte data with a key and nonce. The cipher data is returned +func EncryptAESGCM(data []byte, key *storj.Key, nonce *AESGCMNonce) (cipherData []byte, err error) { + block, err := aes.NewCipher(key[:]) + if err != nil { + return []byte{}, Error.Wrap(err) + } + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return []byte{}, Error.Wrap(err) + } + cipherData = aesgcm.Seal(nil, nonce[:], data, nil) + return cipherData, nil +} + +// DecryptAESGCM decrypts byte data with a key and nonce. The plain data is returned +func DecryptAESGCM(cipherData []byte, key *storj.Key, nonce *AESGCMNonce) (data []byte, err error) { + if len(cipherData) == 0 { + return []byte{}, Error.New("empty cipher data") + } + block, err := aes.NewCipher(key[:]) + if err != nil { + return []byte{}, Error.Wrap(err) + } + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return []byte{}, Error.Wrap(err) + } + plainData, err := aesgcm.Open(nil, nonce[:], cipherData, nil) + if err != nil { + return []byte{}, ErrDecryptFailed.Wrap(err) + } + return plainData, nil +} diff --git a/vendor/storj.io/common/encryption/bits.go b/vendor/storj.io/common/encryption/bits.go new file mode 100644 index 000000000..7a1472542 --- /dev/null +++ b/vendor/storj.io/common/encryption/bits.go @@ -0,0 +1,30 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +// incrementBytes takes a byte slice buf and treats it like a little-endian +// encoded unsigned integer. it adds amount to it (which must be nonnegative) +// in place. if rollover happens (the most significant bytes don't fit +// anymore), truncated is true. +func incrementBytes(buf []byte, amount int64) (truncated bool, err error) { + if amount < 0 { + return false, Error.New("amount was negative") + } + + idx := 0 + for amount > 0 && idx < len(buf) { + var inc, prev byte + inc, amount = byte(amount), amount>>8 + + prev = buf[idx] + buf[idx] += inc + if buf[idx] < prev { + amount++ + } + + idx++ + } + + return amount != 0, nil +} diff --git a/vendor/storj.io/common/encryption/common.go b/vendor/storj.io/common/encryption/common.go new file mode 100644 index 000000000..08b344635 --- /dev/null +++ b/vendor/storj.io/common/encryption/common.go @@ -0,0 +1,18 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package encryption collects common cryptographic primitives needed for path and data encryption. +package encryption + +import ( + "github.com/zeebo/errs" +) + +// Error is the default encryption errs class +var Error = errs.Class("encryption error") + +// ErrDecryptFailed is the errs class when the decryption fails +var ErrDecryptFailed = errs.Class("decryption failed, check encryption key") + +// ErrInvalidConfig is the errs class for invalid configuration +var ErrInvalidConfig = errs.Class("invalid encryption configuration") diff --git a/vendor/storj.io/common/encryption/encryption.go b/vendor/storj.io/common/encryption/encryption.go new file mode 100644 index 000000000..278fe70d1 --- /dev/null +++ b/vendor/storj.io/common/encryption/encryption.go @@ -0,0 +1,156 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "crypto/hmac" + "crypto/sha512" + + "storj.io/common/storj" +) + +const ( + // AESGCMNonceSize is the size of an AES-GCM nonce + AESGCMNonceSize = 12 + // unit32Size is the number of bytes in the uint32 type + uint32Size = 4 +) + +// AESGCMNonce represents the nonce used by the AES-GCM protocol +type AESGCMNonce [AESGCMNonceSize]byte + +// ToAESGCMNonce returns the nonce as a AES-GCM nonce +func ToAESGCMNonce(nonce *storj.Nonce) *AESGCMNonce { + aes := new(AESGCMNonce) + copy((*aes)[:], nonce[:AESGCMNonceSize]) + return aes +} + +// Increment increments the nonce with the given amount +func Increment(nonce *storj.Nonce, amount int64) (truncated bool, err error) { + return incrementBytes(nonce[:], amount) +} + +// Encrypt encrypts data with the given cipher, key and nonce +func Encrypt(data []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { + // Don't encrypt empty slice + if len(data) == 0 { + return []byte{}, nil + } + + switch cipher { + case storj.EncNull: + return data, nil + case storj.EncAESGCM: + return EncryptAESGCM(data, key, ToAESGCMNonce(nonce)) + case storj.EncSecretBox: + return EncryptSecretBox(data, key, nonce) + case storj.EncNullBase64URL: + return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") + default: + return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) + } +} + +// Decrypt decrypts cipherData with the given cipher, key and nonce +func Decrypt(cipherData []byte, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { + // Don't decrypt empty slice + if len(cipherData) == 0 { + return []byte{}, nil + } + + switch cipher { + case storj.EncNull: + return cipherData, nil + case storj.EncAESGCM: + return DecryptAESGCM(cipherData, key, ToAESGCMNonce(nonce)) + case storj.EncSecretBox: + return DecryptSecretBox(cipherData, key, nonce) + case storj.EncNullBase64URL: + return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") + default: + return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) + } +} + +// NewEncrypter creates a Transformer using the given cipher, key and nonce to encrypt data passing through it +func NewEncrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { + switch cipher { + case storj.EncNull: + return &NoopTransformer{}, nil + case storj.EncAESGCM: + return NewAESGCMEncrypter(key, ToAESGCMNonce(startingNonce), encryptedBlockSize) + case storj.EncSecretBox: + return NewSecretboxEncrypter(key, startingNonce, encryptedBlockSize) + case storj.EncNullBase64URL: + return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") + default: + return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) + } +} + +// NewDecrypter creates a Transformer using the given cipher, key and nonce to decrypt data passing through it +func NewDecrypter(cipher storj.CipherSuite, key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { + switch cipher { + case storj.EncNull: + return &NoopTransformer{}, nil + case storj.EncAESGCM: + return NewAESGCMDecrypter(key, ToAESGCMNonce(startingNonce), encryptedBlockSize) + case storj.EncSecretBox: + return NewSecretboxDecrypter(key, startingNonce, encryptedBlockSize) + case storj.EncNullBase64URL: + return nil, ErrInvalidConfig.New("base64 encoding not supported for this operation") + default: + return nil, ErrInvalidConfig.New("encryption type %d is not supported", cipher) + } +} + +// EncryptKey encrypts keyToEncrypt with the given cipher, key and nonce +func EncryptKey(keyToEncrypt *storj.Key, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (storj.EncryptedPrivateKey, error) { + return Encrypt(keyToEncrypt[:], cipher, key, nonce) +} + +// DecryptKey decrypts keyToDecrypt with the given cipher, key and nonce +func DecryptKey(keyToDecrypt storj.EncryptedPrivateKey, cipher storj.CipherSuite, key *storj.Key, nonce *storj.Nonce) (*storj.Key, error) { + plainData, err := Decrypt(keyToDecrypt, cipher, key, nonce) + if err != nil { + return nil, err + } + + var decryptedKey storj.Key + copy(decryptedKey[:], plainData) + + return &decryptedKey, nil +} + +// DeriveKey derives new key from the given key and message using HMAC-SHA512 +func DeriveKey(key *storj.Key, message string) (*storj.Key, error) { + mac := hmac.New(sha512.New, key[:]) + _, err := mac.Write([]byte(message)) + if err != nil { + return nil, Error.Wrap(err) + } + + derived := new(storj.Key) + copy(derived[:], mac.Sum(nil)) + + return derived, nil +} + +// CalcEncryptedSize calculates what would be the size of the cipher data after +// encrypting data with dataSize using a Transformer with the given encryption +// parameters. +func CalcEncryptedSize(dataSize int64, parameters storj.EncryptionParameters) (int64, error) { + transformer, err := NewEncrypter(parameters.CipherSuite, new(storj.Key), new(storj.Nonce), int(parameters.BlockSize)) + if err != nil { + return 0, err + } + + inBlockSize := int64(transformer.InBlockSize()) + blocks := (dataSize + uint32Size + inBlockSize - 1) / inBlockSize + + encryptedSize := blocks * int64(transformer.OutBlockSize()) + + return encryptedSize, nil +} diff --git a/vendor/storj.io/common/encryption/pad.go b/vendor/storj.io/common/encryption/pad.go new file mode 100644 index 000000000..ff3468239 --- /dev/null +++ b/vendor/storj.io/common/encryption/pad.go @@ -0,0 +1,89 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "bytes" + "context" + "encoding/binary" + "io" + "io/ioutil" + + "storj.io/common/ranger" + "storj.io/common/readcloser" +) + +// makePadding calculates how many bytes of padding are needed to fill +// an encryption block then creates a slice of zero bytes that size. +// The last byte of the padding slice contains the count of the total padding bytes added. +func makePadding(dataLen int64, blockSize int) []byte { + amount := dataLen + uint32Size + r := amount % int64(blockSize) + padding := uint32Size + if r > 0 { + padding += blockSize - int(r) + } + paddingBytes := bytes.Repeat([]byte{0}, padding) + binary.BigEndian.PutUint32(paddingBytes[padding-uint32Size:], uint32(padding)) + return paddingBytes +} + +// Pad takes a Ranger and returns another Ranger that is a multiple of +// blockSize in length. The return value padding is a convenience to report how +// much padding was added. +func Pad(data ranger.Ranger, blockSize int) ( + rr ranger.Ranger, padding int) { + paddingBytes := makePadding(data.Size(), blockSize) + return ranger.Concat(data, ranger.ByteRanger(paddingBytes)), len(paddingBytes) +} + +// Unpad takes a previously padded Ranger data source and returns an unpadded +// ranger, given the amount of padding. This is preferable to UnpadSlow if you +// can swing it. +func Unpad(data ranger.Ranger, padding int) (ranger.Ranger, error) { + return ranger.Subrange(data, 0, data.Size()-int64(padding)) +} + +// UnpadSlow is like Unpad, but does not require the amount of padding. +// UnpadSlow will have to do extra work to make up for this missing information. +func UnpadSlow(ctx context.Context, data ranger.Ranger) (_ ranger.Ranger, err error) { + r, err := data.Range(ctx, data.Size()-uint32Size, uint32Size) + if err != nil { + return nil, Error.Wrap(err) + } + var p [uint32Size]byte + _, err = io.ReadFull(r, p[:]) + if err != nil { + return nil, Error.Wrap(err) + } + return Unpad(data, int(binary.BigEndian.Uint32(p[:]))) +} + +// PadReader is like Pad but works on a basic Reader instead of a Ranger. +func PadReader(data io.ReadCloser, blockSize int) io.ReadCloser { + cr := newCountingReader(data) + return readcloser.MultiReadCloser(cr, + readcloser.LazyReadCloser(func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(makePadding(cr.N, blockSize))), nil + })) +} + +type countingReader struct { + R io.ReadCloser + N int64 +} + +func newCountingReader(r io.ReadCloser) *countingReader { + return &countingReader{R: r} +} + +func (cr *countingReader) Read(p []byte) (n int, err error) { + n, err = cr.R.Read(p) + cr.N += int64(n) + return n, err +} + +func (cr *countingReader) Close() error { + return cr.R.Close() +} diff --git a/vendor/storj.io/common/encryption/password.go b/vendor/storj.io/common/encryption/password.go new file mode 100644 index 000000000..f87ea99f2 --- /dev/null +++ b/vendor/storj.io/common/encryption/password.go @@ -0,0 +1,47 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "crypto/hmac" + "crypto/sha256" + + "github.com/zeebo/errs" + "golang.org/x/crypto/argon2" + + "storj.io/common/memory" + "storj.io/common/storj" +) + +func sha256hmac(key, data []byte) ([]byte, error) { + h := hmac.New(sha256.New, key) + if _, err := h.Write(data); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +// DeriveRootKey derives a root key for some path using the salt for the bucket and +// a password from the user. See the password key derivation design doc. +func DeriveRootKey(password, salt []byte, path storj.Path, argon2Threads uint8) (*storj.Key, error) { + mixedSalt, err := sha256hmac(password, salt) + if err != nil { + return nil, err + } + + pathSalt, err := sha256hmac(mixedSalt, []byte(path)) + if err != nil { + return nil, err + } + + // use a time of 1, 64MB of ram, and all of the cores. + keyData := argon2.IDKey(password, pathSalt, 1, uint32(64*memory.MiB/memory.KiB), argon2Threads, 32) + if len(keyData) != len(storj.Key{}) { + return nil, errs.New("invalid output from argon2id") + } + + var key storj.Key + copy(key[:], keyData) + return &key, nil +} diff --git a/vendor/storj.io/common/encryption/path.go b/vendor/storj.io/common/encryption/path.go new file mode 100644 index 000000000..c923a8338 --- /dev/null +++ b/vendor/storj.io/common/encryption/path.go @@ -0,0 +1,510 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "crypto/hmac" + "crypto/sha512" + "encoding/base64" + "strings" + + "github.com/zeebo/errs" + + "storj.io/common/paths" + "storj.io/common/storj" +) + +var ( + emptyComponentPrefix = byte('\x01') + notEmptyComponentPrefix = byte('\x02') + emptyComponent = []byte{emptyComponentPrefix} + + escapeSlash = byte('\x2e') + escapeFF = byte('\xfe') + escape01 = byte('\x01') +) + +// EncryptPathWithStoreCipher encrypts the path looking up keys and the cipher from the +// provided store and bucket. +func EncryptPathWithStoreCipher(bucket string, path paths.Unencrypted, store *Store) ( + encPath paths.Encrypted, err error) { + + return encryptPath(bucket, path, nil, store) +} + +// EncryptPrefixWithStoreCipher encrypts the prefix using the provided cipher and looking up keys from the +// provided store and bucket. Because it is a prefix, it does not assume there is an empty component +// at the end of a path like "foo/bar/". +func EncryptPrefixWithStoreCipher(bucket string, path paths.Unencrypted, store *Store) ( + encPath paths.Encrypted, err error) { + + raw := path.Raw() + hasTrailing := strings.HasSuffix(raw, "/") + if hasTrailing { + path = paths.NewUnencrypted(raw[:len(raw)-1]) + } + encPath, err = encryptPath(bucket, path, nil, store) + if err != nil { + return encPath, err + } + if hasTrailing { + encPath = paths.NewEncrypted(encPath.Raw() + "/") + } + return encPath, nil +} + +// EncryptPath encrypts the path using the provided cipher and looking up keys from the +// provided store and bucket. +func EncryptPath(bucket string, path paths.Unencrypted, pathCipher storj.CipherSuite, store *Store) ( + encPath paths.Encrypted, err error) { + + return encryptPath(bucket, path, &pathCipher, store) +} + +func encryptPath(bucket string, path paths.Unencrypted, pathCipher *storj.CipherSuite, store *Store) ( + encPath paths.Encrypted, err error) { + + // Invalid paths map to invalid paths + if !path.Valid() { + return paths.Encrypted{}, nil + } + + _, consumed, base := store.LookupUnencrypted(bucket, path) + if base == nil { + return paths.Encrypted{}, errs.New("unable to find encryption base for: %s/%q", bucket, path) + } + + if pathCipher == nil { + pathCipher = &base.PathCipher + } + if store.EncryptionBypass { + *pathCipher = storj.EncNullBase64URL + } + if *pathCipher == storj.EncNull { + return paths.NewEncrypted(path.Raw()), nil + } + + remaining, ok := path.Consume(consumed) + if !ok { + return paths.Encrypted{}, errs.New("unable to encrypt bucket path: %s/%q", bucket, path) + } + + // if we're using the default base (meaning the default key), we need + // to include the bucket name in the path derivation. + key := &base.Key + if base.Default { + key, err = derivePathKeyComponent(key, bucket) + if err != nil { + return paths.Encrypted{}, errs.Wrap(err) + } + } + + encrypted, err := EncryptPathRaw(remaining.Raw(), *pathCipher, key) + if err != nil { + return paths.Encrypted{}, errs.Wrap(err) + } + + var builder strings.Builder + _, _ = builder.WriteString(base.Encrypted.Raw()) + + if len(encrypted) > 0 { + if builder.Len() > 0 { + _ = builder.WriteByte('/') + } + _, _ = builder.WriteString(encrypted) + } + + return paths.NewEncrypted(builder.String()), nil +} + +// EncryptPathRaw encrypts the path using the provided key directly. EncryptPath should be +// preferred if possible. +func EncryptPathRaw(raw string, cipher storj.CipherSuite, key *storj.Key) (string, error) { + if cipher == storj.EncNull { + return raw, nil + } + + var builder strings.Builder + for iter, i := paths.NewIterator(raw), 0; !iter.Done(); i++ { + component := iter.Next() + encComponent, err := encryptPathComponent(component, cipher, key) + if err != nil { + return "", errs.Wrap(err) + } + key, err = derivePathKeyComponent(key, component) + if err != nil { + return "", errs.Wrap(err) + } + if i > 0 { + _ = builder.WriteByte('/') + } + + _, _ = builder.WriteString(encComponent) + } + return builder.String(), nil +} + +// DecryptPathWithStoreCipher decrypts the path looking up keys and the cipher from the +// provided store and bucket. +func DecryptPathWithStoreCipher(bucket string, path paths.Encrypted, store *Store) ( + encPath paths.Unencrypted, err error) { + + return decryptPath(bucket, path, nil, store) +} + +// DecryptPath decrypts the path using the provided cipher and looking up keys from the +// provided store and bucket. +func DecryptPath(bucket string, path paths.Encrypted, pathCipher storj.CipherSuite, store *Store) ( + encPath paths.Unencrypted, err error) { + + return decryptPath(bucket, path, &pathCipher, store) +} + +func decryptPath(bucket string, path paths.Encrypted, pathCipher *storj.CipherSuite, store *Store) ( + encPath paths.Unencrypted, err error) { + + // Invalid paths map to invalid paths + if !path.Valid() { + return paths.Unencrypted{}, nil + } + + _, consumed, base := store.LookupEncrypted(bucket, path) + if base == nil { + return paths.Unencrypted{}, errs.New("unable to find decryption base for: %q", path) + } + + if pathCipher == nil { + pathCipher = &base.PathCipher + } + if store.EncryptionBypass { + *pathCipher = storj.EncNullBase64URL + } + if *pathCipher == storj.EncNull { + return paths.NewUnencrypted(path.Raw()), nil + } + + remaining, ok := path.Consume(consumed) + if !ok { + return paths.Unencrypted{}, errs.New("unable to decrypt bucket path: %q", path) + } + + // if we're using the default base (meaning the default key), we need + // to include the bucket name in the path derivation. + key := &base.Key + if base.Default { + key, err = derivePathKeyComponent(key, bucket) + if err != nil { + return paths.Unencrypted{}, errs.Wrap(err) + } + } + + decrypted, err := DecryptPathRaw(remaining.Raw(), *pathCipher, key) + if err != nil { + return paths.Unencrypted{}, errs.Wrap(err) + } + + var builder strings.Builder + _, _ = builder.WriteString(base.Unencrypted.Raw()) + + if len(decrypted) > 0 { + if builder.Len() > 0 { + _ = builder.WriteByte('/') + } + + _, _ = builder.WriteString(decrypted) + } + + return paths.NewUnencrypted(builder.String()), nil +} + +// DecryptPathRaw decrypts the path using the provided key directly. DecryptPath should be +// preferred if possible. +func DecryptPathRaw(raw string, cipher storj.CipherSuite, key *storj.Key) (string, error) { + if cipher == storj.EncNull { + return raw, nil + } + + var builder strings.Builder + for iter, i := paths.NewIterator(raw), 0; !iter.Done(); i++ { + component := iter.Next() + unencComponent, err := decryptPathComponent(component, cipher, key) + if err != nil { + return "", errs.Wrap(err) + } + key, err = derivePathKeyComponent(key, unencComponent) + if err != nil { + return "", errs.Wrap(err) + } + if i > 0 { + _ = builder.WriteByte('/') + } + + _, _ = builder.WriteString(unencComponent) + } + return builder.String(), nil +} + +// DeriveContentKey returns the content key for the passed in path by looking up +// the appropriate base key from the store and bucket and deriving the rest. +func DeriveContentKey(bucket string, path paths.Unencrypted, store *Store) (key *storj.Key, err error) { + key, err = DerivePathKey(bucket, path, store) + if err != nil { + return nil, errs.Wrap(err) + } + key, err = DeriveKey(key, "content") + return key, errs.Wrap(err) +} + +// DerivePathKey returns the path key for the passed in path by looking up the +// appropriate base key from the store and bucket and deriving the rest. +func DerivePathKey(bucket string, path paths.Unencrypted, store *Store) (key *storj.Key, err error) { + _, consumed, base := store.LookupUnencrypted(bucket, path) + if base == nil { + return nil, errs.New("unable to find encryption base for: %s/%q", bucket, path) + } + + // If asking for the key at the bucket, do that and return. + if !path.Valid() { + // if we're using the default base (meaning the default key), we need + // to include the bucket name in the path derivation. + key = &base.Key + if base.Default { + key, err = derivePathKeyComponent(&base.Key, bucket) + if err != nil { + return nil, errs.Wrap(err) + } + } + return key, nil + } + + remaining, ok := path.Consume(consumed) + if !ok { + return nil, errs.New("unable to derive path key for: %s/%q", bucket, path) + } + + // if we're using the default base (meaning the default key), we need + // to include the bucket name in the path derivation. + key = &base.Key + if base.Default { + key, err = derivePathKeyComponent(key, bucket) + if err != nil { + return nil, errs.Wrap(err) + } + } + + for iter := remaining.Iterator(); !iter.Done(); { + key, err = derivePathKeyComponent(key, iter.Next()) + if err != nil { + return nil, errs.Wrap(err) + } + } + return key, nil +} + +// derivePathKeyComponent derives a new key from the provided one using the component. It +// should be preferred over DeriveKey when adding path components as it performs the +// necessary transformation to the component. +func derivePathKeyComponent(key *storj.Key, component string) (*storj.Key, error) { + return DeriveKey(key, "path:"+component) +} + +// encryptPathComponent encrypts a single path component with the provided cipher and key. +func encryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key) (string, error) { + + if cipher == storj.EncNullBase64URL { + decoded, err := base64.URLEncoding.DecodeString(comp) + if err != nil { + return "", Error.New("invalid base64 data: %v", err) + } + return string(decoded), nil + } + + // derive the key for the next path component. this is so that + // every encrypted component has a unique nonce. + derivedKey, err := derivePathKeyComponent(key, comp) + if err != nil { + return "", err + } + + // use the derived key to derive the nonce + mac := hmac.New(sha512.New, derivedKey[:]) + _, err = mac.Write([]byte("nonce")) + if err != nil { + return "", Error.Wrap(err) + } + + nonce := new(storj.Nonce) + copy(nonce[:], mac.Sum(nil)) + + // encrypt the path components with the parent's key and the derived nonce + cipherText, err := Encrypt([]byte(comp), cipher, key, nonce) + if err != nil { + return "", Error.Wrap(err) + } + + nonceSize := storj.NonceSize + if cipher == storj.EncAESGCM { + nonceSize = AESGCMNonceSize + } + + // keep the nonce together with the cipher text + return string(encodeSegment(append(nonce[:nonceSize], cipherText...))), nil +} + +// decryptPathComponent decrypts a single path component with the provided cipher and key. +func decryptPathComponent(comp string, cipher storj.CipherSuite, key *storj.Key) (string, error) { + if comp == "" { + return "", nil + } + + if cipher == storj.EncNullBase64URL { + return base64.URLEncoding.EncodeToString([]byte(comp)), nil + } + + data, err := decodeSegment([]byte(comp)) + if err != nil { + return "", Error.Wrap(err) + } + + nonceSize := storj.NonceSize + if cipher == storj.EncAESGCM { + nonceSize = AESGCMNonceSize + } + if len(data) < nonceSize || nonceSize < 0 { + return "", errs.New("component did not contain enough nonce bytes") + } + + // extract the nonce from the cipher text + nonce := new(storj.Nonce) + copy(nonce[:], data[:nonceSize]) + + decrypted, err := Decrypt(data[nonceSize:], cipher, key, nonce) + if err != nil { + return "", Error.Wrap(err) + } + + return string(decrypted), nil +} + +// encodeSegment encodes segment according to specific rules +// The empty path component is encoded as `\x01` +// Any other path component is encoded as `\x02 + escape(component)` +// +// `\x2e` escapes to `\x2e\x01` +// `\x2f` escapes to `\x2e\x02` +// `\xfe` escapes to `\xfe\x01` +// `\xff` escapes to `\xfe\x02` +// `\x00` escapes to `\x01\x01` +// `\x01` escapes to `\x01\x02 +// for more details see docs/design/path-component-encoding.md +func encodeSegment(segment []byte) []byte { + if len(segment) == 0 { + return emptyComponent + } + + result := make([]byte, 0, len(segment)*2+1) + result = append(result, notEmptyComponentPrefix) + for i := 0; i < len(segment); i++ { + switch segment[i] { + case escapeSlash: + result = append(result, []byte{escapeSlash, 1}...) + case escapeSlash + 1: + result = append(result, []byte{escapeSlash, 2}...) + case escapeFF: + result = append(result, []byte{escapeFF, 1}...) + case escapeFF + 1: + result = append(result, []byte{escapeFF, 2}...) + case escape01 - 1: + result = append(result, []byte{escape01, 1}...) + case escape01: + result = append(result, []byte{escape01, 2}...) + default: + result = append(result, segment[i]) + } + } + return result +} + +func decodeSegment(segment []byte) ([]byte, error) { + err := validateEncodedSegment(segment) + if err != nil { + return []byte{}, err + } + if segment[0] == emptyComponentPrefix { + return []byte{}, nil + } + + currentIndex := 0 + for i := 1; i < len(segment); i++ { + switch { + case i == len(segment)-1: + segment[currentIndex] = segment[i] + case segment[i] == escapeSlash || segment[i] == escapeFF: + segment[currentIndex] = segment[i] + segment[i+1] - 1 + i++ + case segment[i] == escape01: + segment[currentIndex] = segment[i+1] - 1 + i++ + default: + segment[currentIndex] = segment[i] + } + currentIndex++ + } + return segment[:currentIndex], nil +} + +// validateEncodedSegment checks if: +// * The last byte/sequence is not in {escape1, escape2, escape3} +// * Any byte after an escape character is \x01 or \x02 +// * It does not contain any characters in {\x00, \xff, \x2f} +// * It is non-empty +// * It begins with a character in {\x01, \x02} +func validateEncodedSegment(segment []byte) error { + switch { + case len(segment) == 0: + return errs.New("encoded segment cannot be empty") + case segment[0] != emptyComponentPrefix && segment[0] != notEmptyComponentPrefix: + return errs.New("invalid segment prefix") + case segment[0] == emptyComponentPrefix && len(segment) > 1: + return errs.New("segment encoded as empty but contains data") + case segment[0] == notEmptyComponentPrefix && len(segment) == 1: + return errs.New("segment encoded as not empty but doesn't contain data") + } + + if len(segment) == 1 { + return nil + } + + index := 1 + for ; index < len(segment)-1; index++ { + if isEscapeByte(segment[index]) { + if segment[index+1] == 1 || segment[index+1] == 2 { + index++ + continue + } + return errs.New("invalid escape sequence") + } + if isDisallowedByte(segment[index]) { + return errs.New("invalid character in segment") + } + } + if index == len(segment)-1 { + if isEscapeByte(segment[index]) { + return errs.New("invalid escape sequence") + } + if isDisallowedByte(segment[index]) { + return errs.New("invalid character") + } + } + + return nil +} + +func isEscapeByte(b byte) bool { + return b == escapeSlash || b == escapeFF || b == escape01 +} + +func isDisallowedByte(b byte) bool { + return b == 0 || b == '\xff' || b == '/' +} diff --git a/vendor/storj.io/common/encryption/secretbox.go b/vendor/storj.io/common/encryption/secretbox.go new file mode 100644 index 000000000..de48da293 --- /dev/null +++ b/vendor/storj.io/common/encryption/secretbox.go @@ -0,0 +1,119 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "golang.org/x/crypto/nacl/secretbox" + + "storj.io/common/storj" +) + +type secretboxEncrypter struct { + blockSize int + key *storj.Key + startingNonce *storj.Nonce +} + +// NewSecretboxEncrypter returns a Transformer that encrypts the data passing +// through with key. +// +// startingNonce is treated as a big-endian encoded unsigned +// integer, and as blocks pass through, their block number and the starting +// nonce is added together to come up with that block's nonce. Encrypting +// different data with the same key and the same nonce is a huge security +// issue. It's safe to always encode new data with a random key and random +// startingNonce. The monotonically-increasing nonce (that rolls over) is to +// protect against data reordering. +// +// When in doubt, generate a new key from crypto/rand and a startingNonce +// from crypto/rand as often as possible. +func NewSecretboxEncrypter(key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { + if encryptedBlockSize <= secretbox.Overhead { + return nil, ErrInvalidConfig.New("encrypted block size %d too small", encryptedBlockSize) + } + return &secretboxEncrypter{ + blockSize: encryptedBlockSize - secretbox.Overhead, + key: key, + startingNonce: startingNonce, + }, nil +} + +func (s *secretboxEncrypter) InBlockSize() int { + return s.blockSize +} + +func (s *secretboxEncrypter) OutBlockSize() int { + return s.blockSize + secretbox.Overhead +} + +func calcNonce(startingNonce *storj.Nonce, blockNum int64) (rv *storj.Nonce, err error) { + rv = new(storj.Nonce) + if copy(rv[:], (*startingNonce)[:]) != len(rv) { + return rv, Error.New("didn't copy memory?!") + } + _, err = incrementBytes(rv[:], blockNum) + return rv, err +} + +func (s *secretboxEncrypter) Transform(out, in []byte, blockNum int64) ([]byte, error) { + nonce, err := calcNonce(s.startingNonce, blockNum) + if err != nil { + return nil, err + } + return secretbox.Seal(out, in, nonce.Raw(), s.key.Raw()), nil +} + +type secretboxDecrypter struct { + blockSize int + key *storj.Key + startingNonce *storj.Nonce +} + +// NewSecretboxDecrypter returns a Transformer that decrypts the data passing +// through with key. See the comments for NewSecretboxEncrypter about +// startingNonce. +func NewSecretboxDecrypter(key *storj.Key, startingNonce *storj.Nonce, encryptedBlockSize int) (Transformer, error) { + if encryptedBlockSize <= secretbox.Overhead { + return nil, ErrInvalidConfig.New("encrypted block size %d too small", encryptedBlockSize) + } + return &secretboxDecrypter{ + blockSize: encryptedBlockSize - secretbox.Overhead, + key: key, + startingNonce: startingNonce, + }, nil +} + +func (s *secretboxDecrypter) InBlockSize() int { + return s.blockSize + secretbox.Overhead +} + +func (s *secretboxDecrypter) OutBlockSize() int { + return s.blockSize +} + +func (s *secretboxDecrypter) Transform(out, in []byte, blockNum int64) ([]byte, error) { + nonce, err := calcNonce(s.startingNonce, blockNum) + if err != nil { + return nil, err + } + rv, success := secretbox.Open(out, in, nonce.Raw(), s.key.Raw()) + if !success { + return nil, ErrDecryptFailed.New("") + } + return rv, nil +} + +// EncryptSecretBox encrypts byte data with a key and nonce. The cipher data is returned +func EncryptSecretBox(data []byte, key *storj.Key, nonce *storj.Nonce) (cipherData []byte, err error) { + return secretbox.Seal(nil, data, nonce.Raw(), key.Raw()), nil +} + +// DecryptSecretBox decrypts byte data with a key and nonce. The plain data is returned +func DecryptSecretBox(cipherData []byte, key *storj.Key, nonce *storj.Nonce) (data []byte, err error) { + data, success := secretbox.Open(nil, cipherData, nonce.Raw(), key.Raw()) + if !success { + return nil, ErrDecryptFailed.New("") + } + return data, nil +} diff --git a/vendor/storj.io/common/encryption/store.go b/vendor/storj.io/common/encryption/store.go new file mode 100644 index 000000000..4b507220e --- /dev/null +++ b/vendor/storj.io/common/encryption/store.go @@ -0,0 +1,313 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "github.com/zeebo/errs" + + "storj.io/common/paths" + "storj.io/common/storj" +) + +// The Store allows one to find the matching most encrypted key and path for +// some unencrypted path. It also reports a mapping of encrypted to unencrypted paths +// at the searched for unencrypted path. +// +// For example, if the Store contains the mappings +// +// b1, u1/u2/u3 => +// b1, u1/u2/u3/u4 => +// b1, u1/u5 => +// b1, u6 => +// b1, u6/u7/u8 => +// b2, u1 => +// +// Then the following lookups have outputs +// +// b1, u1 => <{e2:u2, e5:u5}, u1, nil> +// b1, u1/u2/u3 => <{e4:u4}, u1/u2/u3, > +// b1, u1/u2/u3/u6 => <{}, u1/u2/u3/, > +// b1, u1/u2/u3/u4 => <{}, u1/u2/u3/u4, > +// b1, u6/u7 => <{e8:u8}, u6/, > +// b2, u1 => <{}, u1, > +type Store struct { + roots map[string]*node + defaultKey *storj.Key + defaultPathCipher storj.CipherSuite + + // EncryptionBypass makes it so we can interoperate with + // the network without having encryption keys. paths will be encrypted but + // base64-encoded, and certain metadata will be unable to be retrieved. + EncryptionBypass bool +} + +// node is a node in the Store graph. It may contain an encryption key and encrypted path, +// a list of children nodes, and data to ensure a bijection between encrypted and unencrypted +// path entries. +type node struct { + unenc map[string]*node // unenc => node + unencMap map[string]string // unenc => enc + enc map[string]*node // enc => node + encMap map[string]string // enc => unenc + base *Base +} + +// Base represents a key with which to derive further keys at some encrypted/unencrypted path. +type Base struct { + Unencrypted paths.Unencrypted + Encrypted paths.Encrypted + Key storj.Key + PathCipher storj.CipherSuite + Default bool +} + +// clone returns a copy of the Base. The implementation can be simple because the +// types of its fields do not contain any references. +func (b *Base) clone() *Base { + if b == nil { + return nil + } + bc := *b + return &bc +} + +// NewStore constructs a Store. +func NewStore() *Store { + return &Store{roots: make(map[string]*node)} +} + +// newNode constructs a node. +func newNode() *node { + return &node{ + unenc: make(map[string]*node), + unencMap: make(map[string]string), + enc: make(map[string]*node), + encMap: make(map[string]string), + } +} + +// SetDefaultKey adds a default key to be returned for any lookup that does not match a bucket. +func (s *Store) SetDefaultKey(defaultKey *storj.Key) { + s.defaultKey = defaultKey +} + +// GetDefaultKey returns the default key, or nil if none has been set. +func (s *Store) GetDefaultKey() *storj.Key { + return s.defaultKey +} + +// SetDefaultPathCipher adds a default path cipher to be returned for any lookup that does not match a bucket. +func (s *Store) SetDefaultPathCipher(defaultPathCipher storj.CipherSuite) { + s.defaultPathCipher = defaultPathCipher +} + +// GetDefaultPathCipher returns the default path cipher, or EncUnspecified if none has been set. +func (s *Store) GetDefaultPathCipher() storj.CipherSuite { + return s.defaultPathCipher +} + +// Add creates a mapping from the unencrypted path to the encrypted path and key. It uses the current default cipher. +func (s *Store) Add(bucket string, unenc paths.Unencrypted, enc paths.Encrypted, key storj.Key) error { + return s.AddWithCipher(bucket, unenc, enc, key, s.defaultPathCipher) +} + +// AddWithCipher creates a mapping from the unencrypted path to the encrypted path and key with the given cipher. +func (s *Store) AddWithCipher(bucket string, unenc paths.Unencrypted, enc paths.Encrypted, key storj.Key, pathCipher storj.CipherSuite) error { + root, ok := s.roots[bucket] + if !ok { + root = newNode() + } + + // Perform the addition starting at the root node. + if err := root.add(unenc.Iterator(), enc.Iterator(), &Base{ + Unencrypted: unenc, + Encrypted: enc, + Key: key, + PathCipher: pathCipher, + }); err != nil { + return err + } + + // only update the root for the bucket if the add was successful. + s.roots[bucket] = root + return nil +} + +// add places the paths and base into the node tree structure. +func (n *node) add(unenc, enc paths.Iterator, base *Base) error { + if unenc.Done() != enc.Done() { + return errs.New("encrypted and unencrypted paths had different number of components") + } + + // If we're done walking the paths, this node must have the provided base. + if unenc.Done() { + n.base = base + return nil + } + + // Walk to the next parts and ensure they're consistent with previous additions. + unencPart, encPart := unenc.Next(), enc.Next() + if exUnencPart, ok := n.encMap[encPart]; ok && exUnencPart != unencPart { + return errs.New("conflicting encrypted parts for unencrypted path") + } + if exEncPart, ok := n.unencMap[unencPart]; ok && exEncPart != encPart { + return errs.New("conflicting encrypted parts for unencrypted path") + } + + // Look up the child node. Since we're sure the unenc and enc mappings are + // consistent, we can look it up in one map and unconditionally insert it + // into both maps if necessary. + child, ok := n.unenc[unencPart] + if !ok { + child = newNode() + } + + // Recurse to the next node in the tree. + if err := child.add(unenc, enc, base); err != nil { + return err + } + + // Only add to the maps if the child add was successful. + n.unencMap[unencPart] = encPart + n.encMap[encPart] = unencPart + n.unenc[unencPart] = child + n.enc[encPart] = child + return nil +} + +// LookupUnencrypted finds the matching most unencrypted path added to the Store, reports how +// much of the path matched, any known unencrypted paths at the requested path, and if a key +// and encrypted path exists for some prefix of the unencrypted path. +func (s *Store) LookupUnencrypted(bucket string, path paths.Unencrypted) ( + revealed map[string]string, consumed paths.Unencrypted, base *Base) { + + root, ok := s.roots[bucket] + if ok { + var rawConsumed string + revealed, rawConsumed, base = root.lookup(path.Iterator(), "", nil, true) + consumed = paths.NewUnencrypted(rawConsumed) + } + if base == nil && s.defaultKey != nil { + return nil, paths.Unencrypted{}, s.defaultBase() + } + return revealed, consumed, base.clone() +} + +// LookupEncrypted finds the matching most encrypted path added to the Store, reports how +// much of the path matched, any known encrypted paths at the requested path, and if a key +// an encrypted path exists for some prefix of the encrypted path. +func (s *Store) LookupEncrypted(bucket string, path paths.Encrypted) ( + revealed map[string]string, consumed paths.Encrypted, base *Base) { + + root, ok := s.roots[bucket] + if ok { + var rawConsumed string + revealed, rawConsumed, base = root.lookup(path.Iterator(), "", nil, false) + consumed = paths.NewEncrypted(rawConsumed) + } + if base == nil && s.defaultKey != nil { + return nil, paths.Encrypted{}, s.defaultBase() + } + return revealed, consumed, base.clone() +} + +func (s *Store) defaultBase() *Base { + return &Base{ + Key: *s.defaultKey, + PathCipher: s.defaultPathCipher, + Default: true, + } +} + +// lookup searches for the path in the node tree structure. +func (n *node) lookup(path paths.Iterator, bestConsumed string, bestBase *Base, unenc bool) ( + map[string]string, string, *Base) { + + // Keep track of the best match so far. + if n.base != nil || bestBase == nil { + bestBase, bestConsumed = n.base, path.Consumed() + } + + // Pick the tree we're walking down based on the unenc bool. + revealed, children := n.unencMap, n.enc + if unenc { + revealed, children = n.encMap, n.unenc + } + + // If we're done walking the path, then return our best match along with the + // revealed paths at this node. + if path.Done() { + return revealed, bestConsumed, bestBase + } + + // Walk to the next node in the tree. If there is no node, then report our best match. + child, ok := children[path.Next()] + if !ok { + return nil, bestConsumed, bestBase + } + + // Recurse to the next node in the tree. + return child.lookup(path, bestConsumed, bestBase, unenc) +} + +// Iterate executes the callback with every value that has been Added to the Store. +// NOTE: This call is lossy! Please upgrade any code paths to use IterateWithCipher! +func (s *Store) Iterate(fn func(string, paths.Unencrypted, paths.Encrypted, storj.Key) error) error { + for bucket, root := range s.roots { + if err := root.iterate(fn, bucket); err != nil { + return err + } + } + return nil +} + +// iterate calls the callback if the node has a base, and recurses to its children. +func (n *node) iterate(fn func(string, paths.Unencrypted, paths.Encrypted, storj.Key) error, bucket string) error { + if n.base != nil { + err := fn(bucket, n.base.Unencrypted, n.base.Encrypted, n.base.Key) + if err != nil { + return err + } + } + + // recurse down only the unenc map, as the enc map should be the same. + for _, child := range n.unenc { + err := child.iterate(fn, bucket) + if err != nil { + return err + } + } + + return nil +} + +// IterateWithCipher executes the callback with every value that has been Added to the Store. +func (s *Store) IterateWithCipher(fn func(string, paths.Unencrypted, paths.Encrypted, storj.Key, storj.CipherSuite) error) error { + for bucket, root := range s.roots { + if err := root.iterateWithCipher(fn, bucket); err != nil { + return err + } + } + return nil +} + +// iterateWithCipher calls the callback if the node has a base, and recurses to its children. +func (n *node) iterateWithCipher(fn func(string, paths.Unencrypted, paths.Encrypted, storj.Key, storj.CipherSuite) error, bucket string) error { + if n.base != nil { + err := fn(bucket, n.base.Unencrypted, n.base.Encrypted, n.base.Key, n.base.PathCipher) + if err != nil { + return err + } + } + + // recurse down only the unenc map, as the enc map should be the same. + for _, child := range n.unenc { + err := child.iterateWithCipher(fn, bucket) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/storj.io/common/encryption/transform.go b/vendor/storj.io/common/encryption/transform.go new file mode 100644 index 000000000..337f7f864 --- /dev/null +++ b/vendor/storj.io/common/encryption/transform.go @@ -0,0 +1,188 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package encryption + +import ( + "bytes" + "context" + "io" + "io/ioutil" + + "github.com/spacemonkeygo/monkit/v3" + + "storj.io/common/ranger" + "storj.io/common/readcloser" +) + +var mon = monkit.Package() + +// A Transformer is a data transformation that may change the size of the blocks +// of data it operates on in a deterministic fashion. +type Transformer interface { + InBlockSize() int // The block size prior to transformation + OutBlockSize() int // The block size after transformation + Transform(out, in []byte, blockNum int64) ([]byte, error) +} + +type transformedReader struct { + r io.ReadCloser + t Transformer + blockNum int64 + inbuf []byte + outbuf []byte + expectedSize int64 + bytesRead int +} + +// NoopTransformer is a dummy Transformer that passes data through without modifying it +type NoopTransformer struct{} + +// InBlockSize is 1 +func (t *NoopTransformer) InBlockSize() int { + return 1 +} + +// OutBlockSize is 1 +func (t *NoopTransformer) OutBlockSize() int { + return 1 +} + +// Transform returns the input without modification +func (t *NoopTransformer) Transform(out, in []byte, blockNum int64) ([]byte, error) { + return append(out, in...), nil +} + +// TransformReader applies a Transformer to a Reader. startingBlockNum should +// probably be 0 unless you know you're already starting at a block offset. +func TransformReader(r io.ReadCloser, t Transformer, + startingBlockNum int64) io.ReadCloser { + return &transformedReader{ + r: r, + t: t, + blockNum: startingBlockNum, + inbuf: make([]byte, t.InBlockSize()), + outbuf: make([]byte, 0, t.OutBlockSize()), + } +} + +// TransformReaderSize creates a TransformReader with expected size, +// i.e. the number of bytes that is expected to be read from this reader. +// If less than the expected bytes are read, the reader will return +// io.ErrUnexpectedEOF instead of io.EOF. +func TransformReaderSize(r io.ReadCloser, t Transformer, + startingBlockNum int64, expectedSize int64) io.ReadCloser { + return &transformedReader{ + r: r, + t: t, + blockNum: startingBlockNum, + inbuf: make([]byte, t.InBlockSize()), + outbuf: make([]byte, 0, t.OutBlockSize()), + expectedSize: expectedSize, + } +} + +func (t *transformedReader) Read(p []byte) (n int, err error) { + if len(t.outbuf) == 0 { + // If there's no more buffered data left, let's fill the buffer with + // the next block + b, err := io.ReadFull(t.r, t.inbuf) + t.bytesRead += b + if err == io.EOF && int64(t.bytesRead) < t.expectedSize { + return 0, io.ErrUnexpectedEOF + } else if err != nil { + return 0, err + } + t.outbuf, err = t.t.Transform(t.outbuf, t.inbuf, t.blockNum) + if err != nil { + if err == io.EOF { + return 0, err + } + return 0, Error.Wrap(err) + } + t.blockNum++ + } + + // return as much as we can from the current buffered block + n = copy(p, t.outbuf) + // slide the uncopied data to the beginning of the buffer + copy(t.outbuf, t.outbuf[n:]) + // resize the buffer + t.outbuf = t.outbuf[:len(t.outbuf)-n] + return n, nil +} + +func (t *transformedReader) Close() error { + return t.r.Close() +} + +type transformedRanger struct { + rr ranger.Ranger + t Transformer +} + +// Transform will apply a Transformer to a Ranger. +func Transform(rr ranger.Ranger, t Transformer) (ranger.Ranger, error) { + if rr.Size()%int64(t.InBlockSize()) != 0 { + return nil, Error.New("invalid transformer and range reader combination." + + "the range reader size is not a multiple of the block size") + } + return &transformedRanger{rr: rr, t: t}, nil +} + +func (t *transformedRanger) Size() int64 { + blocks := t.rr.Size() / int64(t.t.InBlockSize()) + return blocks * int64(t.t.OutBlockSize()) +} + +// CalcEncompassingBlocks is a useful helper function that, given an offset, +// length, and blockSize, will tell you which blocks contain the requested +// offset and length +func CalcEncompassingBlocks(offset, length int64, blockSize int) ( + firstBlock, blockCount int64) { + firstBlock = offset / int64(blockSize) + if length <= 0 { + return firstBlock, 0 + } + lastBlock := (offset + length) / int64(blockSize) + if (offset+length)%int64(blockSize) == 0 { + return firstBlock, lastBlock - firstBlock + } + return firstBlock, 1 + lastBlock - firstBlock +} + +func (t *transformedRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + + // Range may not have been called for block-aligned offsets and lengths, so + // let's figure out which blocks encompass the request + firstBlock, blockCount := CalcEncompassingBlocks( + offset, length, t.t.OutBlockSize()) + // If block count is 0, there is nothing to transform, so return a dumb + // reader that will just return io.EOF on read + if blockCount == 0 { + return ioutil.NopCloser(bytes.NewReader(nil)), nil + } + // okay, now let's get the range on the underlying ranger for those blocks + // and then Transform it. + r, err := t.rr.Range(ctx, + firstBlock*int64(t.t.InBlockSize()), + blockCount*int64(t.t.InBlockSize())) + if err != nil { + return nil, err + } + tr := TransformReaderSize(r, t.t, firstBlock, blockCount*int64(t.t.InBlockSize())) + // the range we got potentially includes more than we wanted. if the + // offset started past the beginning of the first block, we need to + // swallow the first few bytes + _, err = io.CopyN(ioutil.Discard, tr, + offset-firstBlock*int64(t.t.OutBlockSize())) + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + return nil, Error.Wrap(err) + } + // the range might have been too long. only return what was requested + return readcloser.LimitReadCloser(tr, length), nil +} diff --git a/vendor/storj.io/common/errs2/collect.go b/vendor/storj.io/common/errs2/collect.go new file mode 100644 index 000000000..c9d6669b0 --- /dev/null +++ b/vendor/storj.io/common/errs2/collect.go @@ -0,0 +1,40 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package errs2 + +import ( + "time" + + "github.com/zeebo/errs" +) + +// Collect returns first error from channel and all errors that happen within duration +func Collect(errch chan error, duration time.Duration) error { + errch = discardNil(errch) + errlist := []error{<-errch} + timeout := time.After(duration) + for { + select { + case err := <-errch: + errlist = append(errlist, err) + case <-timeout: + return errs.Combine(errlist...) + } + } +} + +// discard nil errors that are returned from services +func discardNil(ch chan error) chan error { + r := make(chan error) + go func() { + for err := range ch { + if err == nil { + continue + } + r <- err + } + close(r) + }() + return r +} diff --git a/vendor/storj.io/common/errs2/doc.go b/vendor/storj.io/common/errs2/doc.go new file mode 100644 index 000000000..c9313aaae --- /dev/null +++ b/vendor/storj.io/common/errs2/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package errs2 collects common error handling functions. +package errs2 diff --git a/vendor/storj.io/common/errs2/group.go b/vendor/storj.io/common/errs2/group.go new file mode 100644 index 000000000..8cbad5f67 --- /dev/null +++ b/vendor/storj.io/common/errs2/group.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package errs2 + +import "sync" + +// Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +type Group struct { + wg sync.WaitGroup + mu sync.Mutex + errors []error +} + +// Go calls the given function in a new goroutine. +func (group *Group) Go(f func() error) { + group.wg.Add(1) + + go func() { + defer group.wg.Done() + + if err := f(); err != nil { + group.mu.Lock() + defer group.mu.Unlock() + + group.errors = append(group.errors, err) + } + }() +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns all errors (if any) from them. +func (group *Group) Wait() []error { + group.wg.Wait() + + return group.errors +} diff --git a/vendor/storj.io/common/errs2/ignore.go b/vendor/storj.io/common/errs2/ignore.go new file mode 100644 index 000000000..7f3c38e7d --- /dev/null +++ b/vendor/storj.io/common/errs2/ignore.go @@ -0,0 +1,30 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package errs2 + +import ( + "context" + + "github.com/zeebo/errs" + + "storj.io/common/internal/grpchook" + "storj.io/common/rpc/rpcstatus" +) + +// IsCanceled returns true, when the error is a cancellation. +func IsCanceled(err error) bool { + return errs.IsFunc(err, func(err error) bool { + return err == context.Canceled || + grpchook.IsErrServerStopped(err) || + rpcstatus.Code(err) == rpcstatus.Canceled + }) +} + +// IgnoreCanceled returns nil, when the operation was about canceling. +func IgnoreCanceled(err error) error { + if IsCanceled(err) { + return nil + } + return err +} diff --git a/vendor/storj.io/common/errs2/rpc.go b/vendor/storj.io/common/errs2/rpc.go new file mode 100644 index 000000000..ae19ed019 --- /dev/null +++ b/vendor/storj.io/common/errs2/rpc.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package errs2 + +import ( + "github.com/zeebo/errs" + + "storj.io/common/rpc/rpcstatus" +) + +// IsRPC checks if err contains an RPC error with the given status code. +func IsRPC(err error, code rpcstatus.StatusCode) bool { + return errs.IsFunc(err, func(err error) bool { + return rpcstatus.Code(err) == code + }) +} diff --git a/vendor/storj.io/common/errs2/sanatizer.go b/vendor/storj.io/common/errs2/sanatizer.go new file mode 100644 index 000000000..6d33aeecf --- /dev/null +++ b/vendor/storj.io/common/errs2/sanatizer.go @@ -0,0 +1,52 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package errs2 + +import ( + "github.com/zeebo/errs" + "go.uber.org/zap" + + "storj.io/common/rpc/rpcstatus" +) + +// CodeMap is used to apply the correct rpc status code to error classes. +type CodeMap map[*errs.Class]rpcstatus.StatusCode + +// LoggingSanitizer consolidates logging of original errors with sanitization of internal errors. +type LoggingSanitizer struct { + wrapper *errs.Class + log *zap.Logger + codeMap CodeMap +} + +// NewLoggingSanitizer creates a new LoggingSanitizer. +func NewLoggingSanitizer(wrapper *errs.Class, log *zap.Logger, codeMap CodeMap) *LoggingSanitizer { + return &LoggingSanitizer{ + wrapper: wrapper, + log: log, + codeMap: codeMap, + } +} + +// Error logs the message and error to the logger and returns the sanitized error. +func (sanitizer *LoggingSanitizer) Error(msg string, err error) error { + if sanitizer.wrapper != nil { + err = sanitizer.wrapper.Wrap(err) + } + + if sanitizer.log != nil { + sanitizer.log.Error(msg, zap.Error(err)) + } + + for errClass, code := range sanitizer.codeMap { + if errClass.Has(err) { + return rpcstatus.Error(code, err.Error()) + } + } + + if sanitizer.wrapper == nil { + return rpcstatus.Error(rpcstatus.Internal, msg) + } + return rpcstatus.Error(rpcstatus.Internal, sanitizer.wrapper.New(msg).Error()) +} diff --git a/vendor/storj.io/common/fpath/atomic.go b/vendor/storj.io/common/fpath/atomic.go new file mode 100644 index 000000000..49f3defbb --- /dev/null +++ b/vendor/storj.io/common/fpath/atomic.go @@ -0,0 +1,42 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package fpath + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/zeebo/errs" +) + +// AtomicWriteFile is a helper to atomically write the data to the outfile. +func AtomicWriteFile(outfile string, data []byte, mode os.FileMode) (err error) { + // TODO: provide better atomicity guarantees, like fsyncing the parent + // directory and, on windows, using MoveFileEx with MOVEFILE_WRITE_THROUGH. + + fh, err := ioutil.TempFile(filepath.Dir(outfile), filepath.Base(outfile)) + if err != nil { + return errs.Wrap(err) + } + defer func() { + if err != nil { + err = errs.Combine(err, fh.Close()) + err = errs.Combine(err, os.Remove(fh.Name())) + } + }() + if _, err := fh.Write(data); err != nil { + return errs.Wrap(err) + } + if err := fh.Sync(); err != nil { + return errs.Wrap(err) + } + if err := fh.Close(); err != nil { + return errs.Wrap(err) + } + if err := os.Rename(fh.Name(), outfile); err != nil { + return errs.Wrap(err) + } + return nil +} diff --git a/vendor/storj.io/common/fpath/doc.go b/vendor/storj.io/common/fpath/doc.go new file mode 100644 index 000000000..8971eab8c --- /dev/null +++ b/vendor/storj.io/common/fpath/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package fpath implements cross-platform file and object path handling. +package fpath diff --git a/vendor/storj.io/common/fpath/editor.go b/vendor/storj.io/common/fpath/editor.go new file mode 100644 index 000000000..2788a2819 --- /dev/null +++ b/vendor/storj.io/common/fpath/editor.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package fpath + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +//EditFile opens the best OS-specific text editor we can find +func EditFile(fileToEdit string) error { + editorPath := getEditorPath() + if editorPath == "" { + return fmt.Errorf("unable to find suitable editor for file %s", fileToEdit) + } + + /* #nosec G204 */ // This function is used by CLI implementations for opening a configuration file + cmd := exec.Command(editorPath, fileToEdit) + cmd.Stdout = os.Stdout + cmd.Stdin = os.Stdin + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func getEditorPath() string { + // we currently only attempt to open TTY-friendly editors here + // we could consider using https://github.com/mattn/go-isatty + // alongside "start" / "open" / "xdg-open" + + //look for a preference in environment variables + for _, eVar := range [...]string{"EDITOR", "VISUAL", "GIT_EDITOR"} { + path := os.Getenv(eVar) + _, err := os.Stat(path) + if len(path) > 0 && err == nil { + return path + } + } + //look for a preference via 'git config' + git, err := exec.LookPath("git") + if err == nil { + /* #nosec G204 */ // The parameter's value is controlled + out, err := exec.Command(git, "config", "core.editor").Output() + if err == nil { + cmd := strings.TrimSpace(string(out)) + _, err := os.Stat(cmd) + if len(cmd) > 0 && err == nil { + return cmd + } + } + } + //heck, just try a bunch of options + for _, exe := range [...]string{"nvim", "vim", "vi", "emacs", "nano", "pico"} { + path, err := exec.LookPath(exe) + if err == nil { + return path + } + } + return "" +} diff --git a/vendor/storj.io/common/fpath/os.go b/vendor/storj.io/common/fpath/os.go new file mode 100644 index 000000000..4898de134 --- /dev/null +++ b/vendor/storj.io/common/fpath/os.go @@ -0,0 +1,133 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package fpath + +import ( + "fmt" + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" + + "github.com/zeebo/errs" +) + +// IsRoot returns whether path is the root directory +func IsRoot(path string) bool { + abs, err := filepath.Abs(path) + if err == nil { + path = abs + } + + return filepath.Dir(path) == path +} + +// ApplicationDir returns best base directory for specific OS +func ApplicationDir(subdir ...string) string { + for i := range subdir { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + subdir[i] = strings.Title(subdir[i]) + } else { + subdir[i] = strings.ToLower(subdir[i]) + } + } + var appdir string + home := os.Getenv("HOME") + + switch runtime.GOOS { + case "windows": + // Windows standards: https://msdn.microsoft.com/en-us/library/windows/apps/hh465094.aspx?f=255&MSPPError=-2147217396 + for _, env := range []string{"AppData", "AppDataLocal", "UserProfile", "Home"} { + val := os.Getenv(env) + if val != "" { + appdir = val + break + } + } + case "darwin": + // Mac standards: https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html + appdir = filepath.Join(home, "Library", "Application Support") + case "linux": + fallthrough + default: + // Linux standards: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + appdir = os.Getenv("XDG_DATA_HOME") + if appdir == "" && home != "" { + appdir = filepath.Join(home, ".local", "share") + } + } + return filepath.Join(append([]string{appdir}, subdir...)...) +} + +// IsValidSetupDir checks if directory is valid for setup configuration +func IsValidSetupDir(name string) (ok bool, err error) { + _, err = os.Stat(name) + if err != nil { + if os.IsNotExist(err) { + return true, err + } + return false, err + } + + /* #nosec G304 */ // The function limits later on paths having a config file + f, err := os.Open(name) + if err != nil { + return false, err + } + defer func() { + err = errs.Combine(err, f.Close()) + }() + + for { + var filenames []string + filenames, err = f.Readdirnames(100) + if err == io.EOF { + // nothing more + return true, nil + } else if err != nil { + // something went wrong + return false, err + } + + for _, filename := range filenames { + if filename == "config.yaml" { + return false, nil + } + } + } +} + +// IsWritable determines if a directory is writeable +func IsWritable(filepath string) (bool, error) { + info, err := os.Stat(filepath) + if err != nil { + return false, err + } + + if !info.IsDir() { + return false, fmt.Errorf("path %s is not a directory", filepath) + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&0200 == 0 { + return false, fmt.Errorf("write permission bit is not set on this file for user") + } + + // Test if user can create file + // There is no OS cross-compatible method for + // determining if a user has write permissions on a folder. + // We can test by attempting to create a file in the folder. + testFile := path.Join(filepath, ".perm") + file, err := os.Create(testFile) // For read access. + if err != nil { + return false, fmt.Errorf("write permission bit is not set on this file for user") + } + + _ = file.Close() + _ = os.Remove(testFile) + + return true, nil +} diff --git a/vendor/storj.io/common/fpath/path.go b/vendor/storj.io/common/fpath/path.go new file mode 100644 index 000000000..b197c185e --- /dev/null +++ b/vendor/storj.io/common/fpath/path.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package fpath + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "regexp" + "strings" +) + +// FPath is an OS independent path handling structure. +type FPath struct { + original string // the original URL or local path + local bool // if local path + bucket string // only for Storj URL + path string // only for Storj URL - the path within the bucket, cleaned from duplicated slashes +} + +var parseSchemeRegex = regexp.MustCompile(`^([a-zA-Z][a-zA-Z0-9+.-]*):(.*)$`) + +func parseScheme(o string) (scheme, rest string) { + found := parseSchemeRegex.FindStringSubmatch(o) + + switch len(found) { + case 2: + return strings.ToLower(found[1]), "" + case 3: + return strings.ToLower(found[1]), found[2] + } + + return "", o +} + +var parseBucketRegex = regexp.MustCompile(`^/{1,4}([^/]+)(/.*)?$`) + +func parseBucket(o string) (bucket, rest string) { + found := parseBucketRegex.FindStringSubmatch(o) + + switch len(found) { + case 2: + return found[1], "" + case 3: + return found[1], found[2] + } + + return "", o +} + +// New creates new FPath from the given URL +func New(p string) (FPath, error) { + fp := FPath{original: p} + + // Skip processing further if we can determine this is an absolute + // path to a local file. + if filepath.IsAbs(p) { + fp.local = true + + return fp, nil + } + + // Does the path have a scheme? If not then we treat it as a local + // path. Otherwise we validate that the scheme is a supported one. + scheme, rest := parseScheme(p) + if scheme == "" { + // Forbid the use of an empty scheme. + if strings.HasPrefix(rest, ":") { + return fp, errors.New("malformed URL: missing scheme, use format sj://bucket/") + } + + fp.local = true + + return fp, nil + } + + switch scheme { + case "s3": + case "sj": + default: + return fp, fmt.Errorf("unsupported URL scheme: %s, use format sj://bucket/", scheme) + } + + // The remaining portion of the path must begin with a bucket. + bucket, rest := parseBucket(rest) + if bucket == "" { + return fp, errors.New("no bucket specified, use format sj://bucket/") + } + + fp.bucket = bucket + + // We only want to clean the path if it is non-empty. This is because + // path. Clean will turn an empty path into ".". + rest = strings.TrimLeft(rest, "/") + if rest != "" { + fp.path = path.Clean(rest) + } + + return fp, nil +} + +// Join is appends the given segment to the path +func (p FPath) Join(segment string) FPath { + if p.local { + p.original = filepath.Join(p.original, segment) + return p + } + + p.original += "/" + segment + p.path = path.Join(p.path, segment) + return p +} + +// Base returns the last segment of the path +func (p FPath) Base() string { + if p.local { + return filepath.Base(p.original) + } + if p.path == "" { + return "" + } + return path.Base(p.path) +} + +// Bucket returns the first segment of path +func (p FPath) Bucket() string { + return p.bucket +} + +// Path returns the URL path without the scheme +func (p FPath) Path() string { + if p.local { + return p.original + } + return p.path +} + +// IsLocal returns whether the path refers to local or remote location +func (p FPath) IsLocal() bool { + return p.local +} + +// String returns the entire URL (untouched) +func (p FPath) String() string { + return p.original +} diff --git a/vendor/storj.io/common/fpath/temp_data.go b/vendor/storj.io/common/fpath/temp_data.go new file mode 100644 index 000000000..d796eaa18 --- /dev/null +++ b/vendor/storj.io/common/fpath/temp_data.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// TODO maybe there is better place for this + +package fpath + +import "context" + +// The key type is unexported to prevent collisions with context keys defined in +// other packages. +type key int + +// temp is the context key for temp struct +const tempKey key = 0 + +type temp struct { + inmemory bool + directory string +} + +// WithTempData creates context with information how store temporary data, in memory or on disk +func WithTempData(ctx context.Context, directory string, inmemory bool) context.Context { + temp := temp{ + inmemory: inmemory, + directory: directory, + } + return context.WithValue(ctx, tempKey, temp) +} + +// GetTempData returns if temporary data should be stored in memory or on disk +func GetTempData(ctx context.Context) (string, bool, bool) { + tempValue, ok := ctx.Value(tempKey).(temp) + if !ok { + return "", false, false + } + return tempValue.directory, tempValue.inmemory, ok +} diff --git a/vendor/storj.io/common/identity/certificate_authority.go b/vendor/storj.io/common/identity/certificate_authority.go new file mode 100644 index 000000000..cda099864 --- /dev/null +++ b/vendor/storj.io/common/identity/certificate_authority.go @@ -0,0 +1,501 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package identity + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "io" + "io/ioutil" + "log" + "sync" + "sync/atomic" + + "github.com/zeebo/errs" + + "storj.io/common/peertls" + "storj.io/common/peertls/extensions" + "storj.io/common/pkcrypto" + "storj.io/common/storj" +) + +const minimumLoggableDifficulty = 8 + +// PeerCertificateAuthority represents the CA which is used to validate peer identities +type PeerCertificateAuthority struct { + RestChain []*x509.Certificate + // Cert is the x509 certificate of the CA + Cert *x509.Certificate + // The ID is calculated from the CA public key. + ID storj.NodeID +} + +// FullCertificateAuthority represents the CA which is used to author and validate full identities +type FullCertificateAuthority struct { + RestChain []*x509.Certificate + // Cert is the x509 certificate of the CA + Cert *x509.Certificate + // The ID is calculated from the CA public key. + ID storj.NodeID + // Key is the private key of the CA + Key crypto.PrivateKey +} + +// CASetupConfig is for creating a CA +type CASetupConfig struct { + VersionNumber uint `default:"0" help:"which identity version to use (0 is latest)"` + ParentCertPath string `help:"path to the parent authority's certificate chain"` + ParentKeyPath string `help:"path to the parent authority's private key"` + CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` + KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"` + Difficulty uint64 `help:"minimum difficulty for identity generation" default:"36"` + Timeout string `help:"timeout for CA generation; golang duration string (0 no timeout)" default:"5m"` + Overwrite bool `help:"if true, existing CA certs AND keys will overwritten" default:"false" setup:"true"` + Concurrency uint `help:"number of concurrent workers for certificate authority generation" default:"4"` +} + +// NewCAOptions is used to pass parameters to `NewCA` +type NewCAOptions struct { + // VersionNumber is the IDVersion to use for the identity + VersionNumber storj.IDVersionNumber + // Difficulty is the number of trailing zero-bits the nodeID must have + Difficulty uint16 + // Concurrency is the number of go routines used to generate a CA of sufficient difficulty + Concurrency uint + // ParentCert, if provided will be prepended to the certificate chain + ParentCert *x509.Certificate + // ParentKey () + ParentKey crypto.PrivateKey + // Logger is used to log generation status updates + Logger io.Writer +} + +// PeerCAConfig is for locating a CA certificate without a private key +type PeerCAConfig struct { + CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` +} + +// FullCAConfig is for locating a CA certificate and it's private key +type FullCAConfig struct { + CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"` + KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"` +} + +// NewCA creates a new full identity with the given difficulty +func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority, err error) { + defer mon.Task()(&ctx)(&err) + var ( + highscore = new(uint32) + i = new(uint32) + + mu sync.Mutex + selectedKey crypto.PrivateKey + selectedID storj.NodeID + ) + + if opts.Concurrency < 1 { + opts.Concurrency = 1 + } + + if opts.Logger != nil { + fmt.Fprintf(opts.Logger, "Generating key with a minimum a difficulty of %d...\n", opts.Difficulty) + } + + version, err := storj.GetIDVersion(opts.VersionNumber) + if err != nil { + return nil, err + } + + updateStatus := func() { + if opts.Logger != nil { + count := atomic.LoadUint32(i) + hs := atomic.LoadUint32(highscore) + _, err := fmt.Fprintf(opts.Logger, "\rGenerated %d keys; best difficulty so far: %d", count, hs) + if err != nil { + log.Print(errs.Wrap(err)) + } + } + } + err = GenerateKeys(ctx, minimumLoggableDifficulty, int(opts.Concurrency), version, + func(k crypto.PrivateKey, id storj.NodeID) (done bool, err error) { + if opts.Logger != nil { + if atomic.AddUint32(i, 1)%100 == 0 { + updateStatus() + } + } + + difficulty, err := id.Difficulty() + if err != nil { + return false, err + } + if difficulty >= opts.Difficulty { + mu.Lock() + if selectedKey == nil { + updateStatus() + selectedKey = k + selectedID = id + } + mu.Unlock() + if opts.Logger != nil { + atomic.SwapUint32(highscore, uint32(difficulty)) + updateStatus() + _, err := fmt.Fprintf(opts.Logger, "\nFound a key with difficulty %d!\n", difficulty) + if err != nil { + log.Print(errs.Wrap(err)) + } + } + return true, nil + } + for { + hs := atomic.LoadUint32(highscore) + if uint32(difficulty) <= hs { + return false, nil + } + if atomic.CompareAndSwapUint32(highscore, hs, uint32(difficulty)) { + updateStatus() + return false, nil + } + } + }) + if err != nil { + return nil, err + } + + ct, err := peertls.CATemplate() + if err != nil { + return nil, err + } + + if err := extensions.AddExtraExtension(ct, storj.NewVersionExt(version)); err != nil { + return nil, err + } + + var cert *x509.Certificate + if opts.ParentKey == nil { + cert, err = peertls.CreateSelfSignedCertificate(selectedKey, ct) + } else { + var pubKey crypto.PublicKey + pubKey, err = pkcrypto.PublicKeyFromPrivate(selectedKey) + if err != nil { + return nil, err + } + cert, err = peertls.CreateCertificate(pubKey, opts.ParentKey, ct, opts.ParentCert) + } + if err != nil { + return nil, err + } + + ca := &FullCertificateAuthority{ + Cert: cert, + Key: selectedKey, + ID: selectedID, + } + if opts.ParentCert != nil { + ca.RestChain = []*x509.Certificate{opts.ParentCert} + } + return ca, nil +} + +// Status returns the status of the CA cert/key files for the config +func (caS CASetupConfig) Status() (TLSFilesStatus, error) { + return statTLSFiles(caS.CertPath, caS.KeyPath) +} + +// Create generates and saves a CA using the config +func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCertificateAuthority, error) { + var ( + err error + parent *FullCertificateAuthority + ) + if caS.ParentCertPath != "" && caS.ParentKeyPath != "" { + parent, err = FullCAConfig{ + CertPath: caS.ParentCertPath, + KeyPath: caS.ParentKeyPath, + }.Load() + if err != nil { + return nil, err + } + } + + if parent == nil { + parent = &FullCertificateAuthority{} + } + + version, err := storj.GetIDVersion(storj.IDVersionNumber(caS.VersionNumber)) + if err != nil { + return nil, err + } + + ca, err := NewCA(ctx, NewCAOptions{ + VersionNumber: version.Number, + Difficulty: uint16(caS.Difficulty), + Concurrency: caS.Concurrency, + ParentCert: parent.Cert, + ParentKey: parent.Key, + Logger: logger, + }) + if err != nil { + return nil, err + } + caC := FullCAConfig{ + CertPath: caS.CertPath, + KeyPath: caS.KeyPath, + } + return ca, caC.Save(ca) +} + +// FullConfig converts a `CASetupConfig` to `FullCAConfig` +func (caS CASetupConfig) FullConfig() FullCAConfig { + return FullCAConfig{ + CertPath: caS.CertPath, + KeyPath: caS.KeyPath, + } +} + +// Load loads a CA from the given configuration +func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) { + p, err := fc.PeerConfig().Load() + if err != nil { + return nil, err + } + + kb, err := ioutil.ReadFile(fc.KeyPath) + if err != nil { + return nil, peertls.ErrNotExist.Wrap(err) + } + k, err := pkcrypto.PrivateKeyFromPEM(kb) + if err != nil { + return nil, err + } + + return &FullCertificateAuthority{ + RestChain: p.RestChain, + Cert: p.Cert, + Key: k, + ID: p.ID, + }, nil +} + +// PeerConfig converts a full ca config to a peer ca config +func (fc FullCAConfig) PeerConfig() PeerCAConfig { + return PeerCAConfig{ + CertPath: fc.CertPath, + } +} + +// Save saves a CA with the given configuration +func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error { + var ( + keyData bytes.Buffer + writeErrs errs.Group + ) + if err := fc.PeerConfig().Save(ca.PeerCA()); err != nil { + writeErrs.Add(err) + return writeErrs.Err() + } + + if fc.KeyPath != "" { + if err := pkcrypto.WritePrivateKeyPEM(&keyData, ca.Key); err != nil { + writeErrs.Add(err) + return writeErrs.Err() + } + if err := writeKeyData(fc.KeyPath, keyData.Bytes()); err != nil { + writeErrs.Add(err) + return writeErrs.Err() + } + } + + return writeErrs.Err() +} + +// SaveBackup saves the certificate of the config wth a timestamped filename +func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error { + return FullCAConfig{ + CertPath: backupPath(fc.CertPath), + KeyPath: backupPath(fc.KeyPath), + }.Save(ca) +} + +// Load loads a CA from the given configuration +func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) { + chainPEM, err := ioutil.ReadFile(pc.CertPath) + if err != nil { + return nil, peertls.ErrNotExist.Wrap(err) + } + + chain, err := pkcrypto.CertsFromPEM(chainPEM) + if err != nil { + return nil, errs.New("failed to load identity %#v: %v", + pc.CertPath, err) + } + + // NB: `CAIndex` is in the context of a complete chain (incl. leaf). + // Here we're loading the CA chain (i.e. without leaf). + nodeID, err := NodeIDFromCert(chain[peertls.CAIndex-1]) + if err != nil { + return nil, err + } + + return &PeerCertificateAuthority{ + // NB: `CAIndex` is in the context of a complete chain (incl. leaf). + // Here we're loading the CA chain (i.e. without leaf). + RestChain: chain[peertls.CAIndex:], + Cert: chain[peertls.CAIndex-1], + ID: nodeID, + }, nil +} + +// Save saves a peer CA (cert, no key) with the given configuration +func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error { + var ( + certData bytes.Buffer + writeErrs errs.Group + ) + + chain := []*x509.Certificate{ca.Cert} + chain = append(chain, ca.RestChain...) + + if pc.CertPath != "" { + if err := peertls.WriteChain(&certData, chain...); err != nil { + writeErrs.Add(err) + return writeErrs.Err() + } + if err := writeChainData(pc.CertPath, certData.Bytes()); err != nil { + writeErrs.Add(err) + return writeErrs.Err() + } + } + return nil +} + +// SaveBackup saves the certificate of the config wth a timestamped filename +func (pc PeerCAConfig) SaveBackup(ca *PeerCertificateAuthority) error { + return PeerCAConfig{ + CertPath: backupPath(pc.CertPath), + }.Save(ca) +} + +// NewIdentity generates a new `FullIdentity` based on the CA. The CA +// cert is included in the identity's cert chain and the identity's leaf cert +// is signed by the CA. +func (ca *FullCertificateAuthority) NewIdentity(exts ...pkix.Extension) (*FullIdentity, error) { + leafTemplate, err := peertls.LeafTemplate() + if err != nil { + return nil, err + } + // TODO: add test for this! + version, err := ca.Version() + if err != nil { + return nil, err + } + leafKey, err := version.NewPrivateKey() + if err != nil { + return nil, err + } + + if err := extensions.AddExtraExtension(leafTemplate, exts...); err != nil { + return nil, err + } + + pubKey, err := pkcrypto.PublicKeyFromPrivate(leafKey) + if err != nil { + return nil, err + } + + leafCert, err := peertls.CreateCertificate(pubKey, ca.Key, leafTemplate, ca.Cert) + if err != nil { + return nil, err + } + + return &FullIdentity{ + RestChain: ca.RestChain, + CA: ca.Cert, + Leaf: leafCert, + Key: leafKey, + ID: ca.ID, + }, nil + +} + +// Chain returns the CA's certificate chain +func (ca *FullCertificateAuthority) Chain() []*x509.Certificate { + return append([]*x509.Certificate{ca.Cert}, ca.RestChain...) +} + +// RawChain returns the CA's certificate chain as a 2d byte slice +func (ca *FullCertificateAuthority) RawChain() [][]byte { + chain := ca.Chain() + rawChain := make([][]byte, len(chain)) + for i, cert := range chain { + rawChain[i] = cert.Raw + } + return rawChain +} + +// RawRestChain returns the "rest" (excluding `ca.Cert`) of the certificate chain as a 2d byte slice +func (ca *FullCertificateAuthority) RawRestChain() [][]byte { + var chain [][]byte + for _, cert := range ca.RestChain { + chain = append(chain, cert.Raw) + } + return chain +} + +// PeerCA converts a FullCertificateAuthority to a PeerCertificateAuthority +func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority { + return &PeerCertificateAuthority{ + Cert: ca.Cert, + ID: ca.ID, + RestChain: ca.RestChain, + } +} + +// Sign signs the passed certificate with ca certificate +func (ca *FullCertificateAuthority) Sign(cert *x509.Certificate) (*x509.Certificate, error) { + signedCert, err := peertls.CreateCertificate(cert.PublicKey, ca.Key, cert, ca.Cert) + if err != nil { + return nil, errs.Wrap(err) + } + return signedCert, nil +} + +// Version looks up the version based on the certificate's ID version extension. +func (ca *FullCertificateAuthority) Version() (storj.IDVersion, error) { + return storj.IDVersionFromCert(ca.Cert) +} + +// AddExtension adds extensions to certificate authority certificate. Extensions +// are serialized into the certificate's raw bytes and it is re-signed by itself. +func (ca *FullCertificateAuthority) AddExtension(exts ...pkix.Extension) error { + // TODO: how to properly handle this? + if len(ca.RestChain) > 0 { + return errs.New("adding extensions requires parent certificate's private key") + } + + if err := extensions.AddExtraExtension(ca.Cert, exts...); err != nil { + return err + } + + updatedCert, err := peertls.CreateSelfSignedCertificate(ca.Key, ca.Cert) + if err != nil { + return err + } + + ca.Cert = updatedCert + return nil +} + +// Revoke extends the certificate authority certificate with a certificate revocation extension. +func (ca *FullCertificateAuthority) Revoke() error { + ext, err := extensions.NewRevocationExt(ca.Key, ca.Cert) + if err != nil { + return err + } + + return ca.AddExtension(ext) +} diff --git a/vendor/storj.io/common/identity/common.go b/vendor/storj.io/common/identity/common.go new file mode 100644 index 000000000..2a35c6c94 --- /dev/null +++ b/vendor/storj.io/common/identity/common.go @@ -0,0 +1,16 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package identity + +import ( + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" +) + +var ( + mon = monkit.Package() + + // Error is a pkg/identity error + Error = errs.Class("identity error") +) diff --git a/vendor/storj.io/common/identity/doc.go b/vendor/storj.io/common/identity/doc.go new file mode 100644 index 000000000..f692c4709 --- /dev/null +++ b/vendor/storj.io/common/identity/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package identity implements CA and Peer identity management and generation. +package identity diff --git a/vendor/storj.io/common/identity/generate.go b/vendor/storj.io/common/identity/generate.go new file mode 100644 index 000000000..51e30627e --- /dev/null +++ b/vendor/storj.io/common/identity/generate.go @@ -0,0 +1,89 @@ +// Copyright (c) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package identity + +import ( + "context" + "crypto" + + "storj.io/common/pkcrypto" + "storj.io/common/storj" +) + +// GenerateKey generates a private key with a node id with difficulty at least +// minDifficulty. No parallelism is used. +func GenerateKey(ctx context.Context, minDifficulty uint16, version storj.IDVersion) ( + k crypto.PrivateKey, id storj.NodeID, err error) { + defer mon.Task()(&ctx)(&err) + + var d uint16 + for { + err = ctx.Err() + if err != nil { + break + } + k, err = version.NewPrivateKey() + if err != nil { + break + } + + var pubKey crypto.PublicKey + pubKey, err = pkcrypto.PublicKeyFromPrivate(k) + if err != nil { + break + } + + id, err = NodeIDFromKey(pubKey, version) + if err != nil { + break + } + d, err = id.Difficulty() + if err != nil { + break + } + if d >= minDifficulty { + return k, id, nil + } + } + return k, id, storj.ErrNodeID.Wrap(err) +} + +// GenerateCallback indicates that key generation is done when done is true. +// if err != nil key generation will stop with that error +type GenerateCallback func(crypto.PrivateKey, storj.NodeID) (done bool, err error) + +// GenerateKeys continues to generate keys until found returns done == false, +// or the ctx is canceled. +func GenerateKeys(ctx context.Context, minDifficulty uint16, concurrency int, version storj.IDVersion, found GenerateCallback) (err error) { + defer mon.Task()(&ctx)(&err) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + errchan := make(chan error, concurrency) + + for i := 0; i < concurrency; i++ { + go func() { + for { + k, id, err := GenerateKey(ctx, minDifficulty, version) + if err != nil { + errchan <- err + return + } + + done, err := found(k, id) + if err != nil { + errchan <- err + return + } + if done { + errchan <- nil + return + } + } + }() + } + + // we only care about the first error. the rest of the errors will be + // context cancellation errors + return <-errchan +} diff --git a/vendor/storj.io/common/identity/identity.go b/vendor/storj.io/common/identity/identity.go new file mode 100644 index 000000000..9aa08101a --- /dev/null +++ b/vendor/storj.io/common/identity/identity.go @@ -0,0 +1,547 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package identity + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/zeebo/errs" + + "storj.io/common/peertls" + "storj.io/common/peertls/extensions" + "storj.io/common/pkcrypto" + "storj.io/common/rpc/rpcpeer" + "storj.io/common/storj" +) + +// PeerIdentity represents another peer on the network. +type PeerIdentity struct { + RestChain []*x509.Certificate + // CA represents the peer's self-signed CA. + CA *x509.Certificate + // Leaf represents the leaf they're currently using. The leaf should be + // signed by the CA. The leaf is what is used for communication. + Leaf *x509.Certificate + // The ID taken from the CA public key. + ID storj.NodeID +} + +// FullIdentity represents you on the network. In addition to a PeerIdentity, +// a FullIdentity also has a Key, which a PeerIdentity doesn't have. +type FullIdentity struct { + RestChain []*x509.Certificate + // CA represents the peer's self-signed CA. The ID is taken from this cert. + CA *x509.Certificate + // Leaf represents the leaf they're currently using. The leaf should be + // signed by the CA. The leaf is what is used for communication. + Leaf *x509.Certificate + // The ID taken from the CA public key. + ID storj.NodeID + // Key is the key this identity uses with the leaf for communication. + Key crypto.PrivateKey +} + +// ManageablePeerIdentity is a `PeerIdentity` and its corresponding `FullCertificateAuthority` +// in a single struct. It is used for making changes to the identity that require CA +// authorization; e.g. adding extensions. +type ManageablePeerIdentity struct { + *PeerIdentity + CA *FullCertificateAuthority +} + +// ManageableFullIdentity is a `FullIdentity` and its corresponding `FullCertificateAuthority` +// in a single struct. It is used for making changes to the identity that require CA +// authorization and the leaf private key; e.g. revoking a leaf cert (private key changes). +type ManageableFullIdentity struct { + *FullIdentity + CA *FullCertificateAuthority +} + +// SetupConfig allows you to run a set of Responsibilities with the given +// identity. You can also just load an Identity from disk. +type SetupConfig struct { + CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/identity.cert" path:"true"` + KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/identity.key" path:"true"` + Overwrite bool `help:"if true, existing identity certs AND keys will overwritten for" default:"false" setup:"true"` + Version string `help:"semantic version of identity storage format" default:"0"` +} + +// Config allows you to run a set of Responsibilities with the given +// identity. You can also just load an Identity from disk. +type Config struct { + CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/identity.cert" user:"true" path:"true"` + KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/identity.key" user:"true" path:"true"` +} + +// PeerConfig allows you to interact with a peer identity (cert, no key) on disk. +type PeerConfig struct { + CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/identity.cert" user:"true" path:"true"` +} + +// FullCertificateAuthorityFromPEM loads a FullIdentity from a certificate chain and +// private key PEM-encoded bytes. +func FullCertificateAuthorityFromPEM(chainPEM, keyPEM []byte) (*FullCertificateAuthority, error) { + peerCA, err := PeerCertificateAuthorityFromPEM(chainPEM) + if err != nil { + return nil, err + } + + // NB: there shouldn't be multiple keys in the key file but if there + // are, this uses the first one + key, err := pkcrypto.PrivateKeyFromPEM(keyPEM) + if err != nil { + return nil, err + } + + return &FullCertificateAuthority{ + RestChain: peerCA.RestChain, + Cert: peerCA.Cert, + Key: key, + ID: peerCA.ID, + }, nil +} + +// PeerCertificateAuthorityFromPEM loads a FullIdentity from a certificate chain and +// private key PEM-encoded bytes. +func PeerCertificateAuthorityFromPEM(chainPEM []byte) (*PeerCertificateAuthority, error) { + chain, err := pkcrypto.CertsFromPEM(chainPEM) + if err != nil { + return nil, errs.Wrap(err) + } + // NB: the "leaf" cert in a CA chain is the "CA" cert in an identity chain + nodeID, err := NodeIDFromCert(chain[peertls.LeafIndex]) + if err != nil { + return nil, err + } + + return &PeerCertificateAuthority{ + RestChain: chain[peertls.CAIndex:], + Cert: chain[peertls.LeafIndex], + ID: nodeID, + }, nil +} + +// FullIdentityFromPEM loads a FullIdentity from a certificate chain and +// private key PEM-encoded bytes. +func FullIdentityFromPEM(chainPEM, keyPEM []byte) (*FullIdentity, error) { + peerIdent, err := PeerIdentityFromPEM(chainPEM) + if err != nil { + return nil, err + } + + // NB: there shouldn't be multiple keys in the key file but if there + // are, this uses the first one + key, err := pkcrypto.PrivateKeyFromPEM(keyPEM) + if err != nil { + return nil, err + } + + return &FullIdentity{ + RestChain: peerIdent.RestChain, + CA: peerIdent.CA, + Leaf: peerIdent.Leaf, + Key: key, + ID: peerIdent.ID, + }, nil +} + +// PeerIdentityFromPEM loads a PeerIdentity from a certificate chain and +// private key PEM-encoded bytes. +func PeerIdentityFromPEM(chainPEM []byte) (*PeerIdentity, error) { + chain, err := pkcrypto.CertsFromPEM(chainPEM) + if err != nil { + return nil, errs.Wrap(err) + } + if len(chain) < peertls.CAIndex+1 { + return nil, pkcrypto.ErrChainLength.New("identity chain does not contain a CA certificate") + } + nodeID, err := NodeIDFromCert(chain[peertls.CAIndex]) + if err != nil { + return nil, err + } + + return &PeerIdentity{ + RestChain: chain[peertls.CAIndex+1:], + CA: chain[peertls.CAIndex], + Leaf: chain[peertls.LeafIndex], + ID: nodeID, + }, nil +} + +// PeerIdentityFromChain loads a PeerIdentity from an identity certificate chain. +func PeerIdentityFromChain(chain []*x509.Certificate) (*PeerIdentity, error) { + nodeID, err := NodeIDFromCert(chain[peertls.CAIndex]) + if err != nil { + return nil, err + } + + return &PeerIdentity{ + RestChain: chain[peertls.CAIndex+1:], + CA: chain[peertls.CAIndex], + ID: nodeID, + Leaf: chain[peertls.LeafIndex], + }, nil +} + +// PeerIdentityFromPeer loads a PeerIdentity from a peer connection. +func PeerIdentityFromPeer(peer *rpcpeer.Peer) (*PeerIdentity, error) { + chain := peer.State.PeerCertificates + if len(chain)-1 < peertls.CAIndex { + return nil, Error.New("invalid certificate chain") + } + pi, err := PeerIdentityFromChain(chain) + if err != nil { + return nil, err + } + return pi, nil +} + +// PeerIdentityFromContext loads a PeerIdentity from a ctx TLS credentials. +func PeerIdentityFromContext(ctx context.Context) (*PeerIdentity, error) { + peer, err := rpcpeer.FromContext(ctx) + if err != nil { + return nil, err + } + return PeerIdentityFromPeer(peer) +} + +// NodeIDFromCertPath loads a node ID from a certificate file path. +func NodeIDFromCertPath(certPath string) (storj.NodeID, error) { + /* #nosec G304 */ // Subsequent calls ensure that the file is a certificate + certBytes, err := ioutil.ReadFile(certPath) + if err != nil { + return storj.NodeID{}, err + } + return NodeIDFromPEM(certBytes) +} + +// NodeIDFromPEM loads a node ID from certificate bytes. +func NodeIDFromPEM(pemBytes []byte) (storj.NodeID, error) { + chain, err := pkcrypto.CertsFromPEM(pemBytes) + if err != nil { + return storj.NodeID{}, Error.New("invalid identity certificate") + } + if len(chain)-1 < peertls.CAIndex { + return storj.NodeID{}, Error.New("no CA in identity certificate") + } + return NodeIDFromCert(chain[peertls.CAIndex]) +} + +// NodeIDFromCert looks for a version in an ID version extension in the passed +// cert and then calculates a versioned node ID using the certificate public key. +// NB: `cert` would typically be an identity's certificate authority certificate. +func NodeIDFromCert(cert *x509.Certificate) (id storj.NodeID, err error) { + version, err := storj.IDVersionFromCert(cert) + if err != nil { + return id, Error.Wrap(err) + } + return NodeIDFromKey(cert.PublicKey, version) +} + +// NodeIDFromKey calculates the node ID for a given public key with the passed version. +func NodeIDFromKey(k crypto.PublicKey, version storj.IDVersion) (storj.NodeID, error) { + idBytes, err := peertls.DoubleSHA256PublicKey(k) + if err != nil { + return storj.NodeID{}, storj.ErrNodeID.Wrap(err) + } + return storj.NewVersionedID(idBytes, version), nil +} + +// NewFullIdentity creates a new ID for nodes with difficulty and concurrency params. +func NewFullIdentity(ctx context.Context, opts NewCAOptions) (*FullIdentity, error) { + ca, err := NewCA(ctx, opts) + if err != nil { + return nil, err + } + identity, err := ca.NewIdentity() + if err != nil { + return nil, err + } + return identity, err +} + +// ToChains takes a number of certificate chains and returns them as a 2d slice of chains of certificates. +func ToChains(chains ...[]*x509.Certificate) [][]*x509.Certificate { + combinedChains := make([][]*x509.Certificate, len(chains)) + copy(combinedChains, chains) + return combinedChains +} + +// NewManageablePeerIdentity returns a manageable identity given a full identity and a full certificate authority. +func NewManageablePeerIdentity(ident *PeerIdentity, ca *FullCertificateAuthority) *ManageablePeerIdentity { + return &ManageablePeerIdentity{ + PeerIdentity: ident, + CA: ca, + } +} + +// NewManageableFullIdentity returns a manageable identity given a full identity and a full certificate authority. +func NewManageableFullIdentity(ident *FullIdentity, ca *FullCertificateAuthority) *ManageableFullIdentity { + return &ManageableFullIdentity{ + FullIdentity: ident, + CA: ca, + } +} + +// Status returns the status of the identity cert/key files for the config +func (is SetupConfig) Status() (TLSFilesStatus, error) { + return statTLSFiles(is.CertPath, is.KeyPath) +} + +// Create generates and saves a CA using the config +func (is SetupConfig) Create(ca *FullCertificateAuthority) (*FullIdentity, error) { + fi, err := ca.NewIdentity() + if err != nil { + return nil, err + } + fi.CA = ca.Cert + ic := Config{ + CertPath: is.CertPath, + KeyPath: is.KeyPath, + } + return fi, ic.Save(fi) +} + +// FullConfig converts a `SetupConfig` to `Config` +func (is SetupConfig) FullConfig() Config { + return Config{ + CertPath: is.CertPath, + KeyPath: is.KeyPath, + } +} + +// Load loads a FullIdentity from the config +func (ic Config) Load() (*FullIdentity, error) { + c, err := ioutil.ReadFile(ic.CertPath) + if err != nil { + return nil, peertls.ErrNotExist.Wrap(err) + } + k, err := ioutil.ReadFile(ic.KeyPath) + if err != nil { + return nil, peertls.ErrNotExist.Wrap(err) + } + fi, err := FullIdentityFromPEM(c, k) + if err != nil { + return nil, errs.New("failed to load identity %#v, %#v: %v", + ic.CertPath, ic.KeyPath, err) + } + return fi, nil +} + +// Save saves a FullIdentity according to the config +func (ic Config) Save(fi *FullIdentity) error { + var ( + certData, keyData bytes.Buffer + writeChainErr, writeChainDataErr, writeKeyErr, writeKeyDataErr error + ) + + chain := []*x509.Certificate{fi.Leaf, fi.CA} + chain = append(chain, fi.RestChain...) + + if ic.CertPath != "" { + writeChainErr = peertls.WriteChain(&certData, chain...) + writeChainDataErr = writeChainData(ic.CertPath, certData.Bytes()) + } + + if ic.KeyPath != "" { + writeKeyErr = pkcrypto.WritePrivateKeyPEM(&keyData, fi.Key) + writeKeyDataErr = writeKeyData(ic.KeyPath, keyData.Bytes()) + } + + writeErr := errs.Combine(writeChainErr, writeKeyErr) + if writeErr != nil { + return writeErr + } + + return errs.Combine( + writeChainDataErr, + writeKeyDataErr, + ) +} + +// SaveBackup saves the certificate of the config with a timestamped filename +func (ic Config) SaveBackup(fi *FullIdentity) error { + return Config{ + CertPath: backupPath(ic.CertPath), + KeyPath: backupPath(ic.KeyPath), + }.Save(fi) +} + +// PeerConfig converts a Config to a PeerConfig +func (ic Config) PeerConfig() *PeerConfig { + return &PeerConfig{ + CertPath: ic.CertPath, + } +} + +// Load loads a PeerIdentity from the config +func (ic PeerConfig) Load() (*PeerIdentity, error) { + c, err := ioutil.ReadFile(ic.CertPath) + if err != nil { + return nil, peertls.ErrNotExist.Wrap(err) + } + pi, err := PeerIdentityFromPEM(c) + if err != nil { + return nil, errs.New("failed to load identity %#v: %v", ic.CertPath, err) + } + return pi, nil +} + +// Save saves a PeerIdentity according to the config +func (ic PeerConfig) Save(peerIdent *PeerIdentity) error { + chain := []*x509.Certificate{peerIdent.Leaf, peerIdent.CA} + chain = append(chain, peerIdent.RestChain...) + + if ic.CertPath != "" { + var certData bytes.Buffer + err := peertls.WriteChain(&certData, chain...) + if err != nil { + return err + } + + return writeChainData(ic.CertPath, certData.Bytes()) + } + + return nil +} + +// SaveBackup saves the certificate of the config with a timestamped filename +func (ic PeerConfig) SaveBackup(pi *PeerIdentity) error { + return PeerConfig{ + CertPath: backupPath(ic.CertPath), + }.Save(pi) +} + +// Chain returns the Identity's certificate chain +func (fi *FullIdentity) Chain() []*x509.Certificate { + return append([]*x509.Certificate{fi.Leaf, fi.CA}, fi.RestChain...) +} + +// RawChain returns all of the certificate chain as a 2d byte slice +func (fi *FullIdentity) RawChain() [][]byte { + chain := fi.Chain() + rawChain := make([][]byte, len(chain)) + for i, cert := range chain { + rawChain[i] = cert.Raw + } + return rawChain +} + +// RawRestChain returns the rest (excluding leaf and CA) of the certificate chain as a 2d byte slice +func (fi *FullIdentity) RawRestChain() [][]byte { + rawChain := make([][]byte, len(fi.RestChain)) + for _, cert := range fi.RestChain { + rawChain = append(rawChain, cert.Raw) + } + return rawChain +} + +// PeerIdentity converts a FullIdentity into a PeerIdentity +func (fi *FullIdentity) PeerIdentity() *PeerIdentity { + return &PeerIdentity{ + CA: fi.CA, + Leaf: fi.Leaf, + ID: fi.ID, + RestChain: fi.RestChain, + } +} + +// Version looks up the version based on the certificate's ID version extension. +func (fi *FullIdentity) Version() (storj.IDVersion, error) { + return storj.IDVersionFromCert(fi.CA) +} + +// AddExtension adds extensions to the leaf cert of an identity. Extensions +// are serialized into the certificate's raw bytes and is re-signed by it's +// certificate authority. +func (manageableIdent *ManageablePeerIdentity) AddExtension(ext ...pkix.Extension) error { + if err := extensions.AddExtraExtension(manageableIdent.Leaf, ext...); err != nil { + return err + } + + updatedCert, err := peertls.CreateCertificate(manageableIdent.Leaf.PublicKey, manageableIdent.CA.Key, manageableIdent.Leaf, manageableIdent.CA.Cert) + if err != nil { + return err + } + + manageableIdent.Leaf = updatedCert + return nil +} + +// Revoke extends the CA certificate with a certificate revocation extension. +func (manageableIdent *ManageableFullIdentity) Revoke() error { + ext, err := extensions.NewRevocationExt(manageableIdent.CA.Key, manageableIdent.Leaf) + if err != nil { + return err + } + + revokingIdent, err := manageableIdent.CA.NewIdentity(ext) + if err != nil { + return err + } + + manageableIdent.Leaf = revokingIdent.Leaf + + return nil +} + +func backupPath(path string) string { + pathExt := filepath.Ext(path) + base := strings.TrimSuffix(path, pathExt) + return fmt.Sprintf( + "%s.%s%s", + base, + strconv.Itoa(int(time.Now().Unix())), + pathExt, + ) +} + +// EncodePeerIdentity encodes the complete identity chain to bytes +func EncodePeerIdentity(pi *PeerIdentity) []byte { + var chain []byte + chain = append(chain, pi.Leaf.Raw...) + chain = append(chain, pi.CA.Raw...) + for _, cert := range pi.RestChain { + chain = append(chain, cert.Raw...) + } + return chain +} + +// DecodePeerIdentity Decodes the bytes into complete identity chain +func DecodePeerIdentity(ctx context.Context, chain []byte) (_ *PeerIdentity, err error) { + defer mon.Task()(&ctx)(&err) + + var certs []*x509.Certificate + for len(chain) > 0 { + var raw asn1.RawValue + var err error + + chain, err = asn1.Unmarshal(chain, &raw) + if err != nil { + return nil, Error.Wrap(err) + } + + cert, err := pkcrypto.CertFromDER(raw.FullBytes) + if err != nil { + return nil, Error.Wrap(err) + } + + certs = append(certs, cert) + } + if len(certs) < 2 { + return nil, Error.New("not enough certificates") + } + return PeerIdentityFromChain(certs) +} diff --git a/vendor/storj.io/common/identity/utils.go b/vendor/storj.io/common/identity/utils.go new file mode 100644 index 000000000..f7a25fb0a --- /dev/null +++ b/vendor/storj.io/common/identity/utils.go @@ -0,0 +1,108 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package identity + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/zeebo/errs" + + "storj.io/common/fpath" +) + +// TLSFilesStatus is the status of keys +type TLSFilesStatus int + +// Four possible outcomes for four files +const ( + NoCertNoKey = TLSFilesStatus(iota) + CertNoKey + NoCertKey + CertKey +) + +var ( + // ErrZeroBytes is returned for zero slice + ErrZeroBytes = errs.New("byte slice was unexpectedly empty") +) + +// writeChainData writes data to path ensuring permissions are appropriate for a cert +func writeChainData(path string, data []byte) error { + err := writeFile(path, 0744, 0644, data) + if err != nil { + return errs.New("unable to write certificate to \"%s\": %v", path, err) + } + return nil +} + +// writeKeyData writes data to path ensuring permissions are appropriate for a cert +func writeKeyData(path string, data []byte) error { + err := writeFile(path, 0700, 0600, data) + if err != nil { + return errs.New("unable to write key to \"%s\": %v", path, err) + } + return nil +} + +// writeFile writes to path, creating directories and files with the necessary permissions +func writeFile(path string, dirmode, filemode os.FileMode, data []byte) error { + if err := os.MkdirAll(filepath.Dir(path), dirmode); err != nil { + return errs.Wrap(err) + } + if writable, err := fpath.IsWritable(filepath.Dir(path)); !writable || err != nil { + return errs.Wrap(errs.New("%s is not a writeable directory: %s\n", path, err)) + } + if err := ioutil.WriteFile(path, data, filemode); err != nil { + return errs.Wrap(err) + } + + return nil +} + +func statTLSFiles(certPath, keyPath string) (status TLSFilesStatus, err error) { + hasKey := true + hasCert := true + + _, err = os.Stat(certPath) + if err != nil { + if os.IsNotExist(err) { + hasCert = false + } else { + return NoCertNoKey, err + } + } + + _, err = os.Stat(keyPath) + if err != nil { + if os.IsNotExist(err) { + hasKey = false + } else { + return NoCertNoKey, err + } + } + switch { + case hasCert && hasKey: + return CertKey, nil + case hasCert: + return CertNoKey, nil + case hasKey: + return NoCertKey, nil + } + + return NoCertNoKey, nil +} + +func (t TLSFilesStatus) String() string { + switch t { + case CertKey: + return "certificate and key" + case CertNoKey: + return "certificate" + case NoCertKey: + return "key" + } + return "" +} diff --git a/vendor/storj.io/common/internal/grpchook/hook.go b/vendor/storj.io/common/internal/grpchook/hook.go new file mode 100644 index 000000000..40319eb3b --- /dev/null +++ b/vendor/storj.io/common/internal/grpchook/hook.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package grpchook exists to avoid introducing a dependency to +// grpc unless pb/pbgrpc is imported in other packages. +package grpchook + +import ( + "context" + "crypto/tls" + "errors" + "net" +) + +// ErrNotHooked is returned from funcs when pb/pbgrpc hasn't been imported. +var ErrNotHooked = errors.New("grpc not hooked") + +// HookedErrServerStopped is the grpc.ErrServerStopped when initialized. +var HookedErrServerStopped error + +// IsErrServerStopped returns when err == grpc.ErrServerStopped and +// pb/pbgrpc has been imported. +func IsErrServerStopped(err error) bool { + if HookedErrServerStopped == nil { + return false + } + + return HookedErrServerStopped == err +} + +// HookedInternalFromContext returns grpc peer information from context. +var HookedInternalFromContext func(ctx context.Context) (addr net.Addr, state tls.ConnectionState, err error) + +// InternalFromContext returns the peer that was previously associated by NewContext using grpc. +func InternalFromContext(ctx context.Context) (addr net.Addr, state tls.ConnectionState, err error) { + if HookedInternalFromContext == nil { + return nil, tls.ConnectionState{}, ErrNotHooked + } + + return HookedInternalFromContext(ctx) +} + +// StatusCode is rpcstatus.Code, however it cannot use it directly without introducing a +// circular dependency. +type StatusCode uint64 + +// HookedErrorWrap is the func to wrap a status code. +var HookedErrorWrap func(code StatusCode, err error) error + +// HookedConvertToStatusCode tries to convert grpc error status to rpcstatus.StatusCode +var HookedConvertToStatusCode func(err error) (StatusCode, bool) diff --git a/vendor/storj.io/common/macaroon/apikey.go b/vendor/storj.io/common/macaroon/apikey.go new file mode 100644 index 000000000..4db01466d --- /dev/null +++ b/vendor/storj.io/common/macaroon/apikey.go @@ -0,0 +1,294 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package macaroon + +import ( + "bytes" + "context" + "time" + + "github.com/btcsuite/btcutil/base58" + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/pb" +) + +var ( + // Error is a general API Key error + Error = errs.Class("api key error") + // ErrFormat means that the structural formatting of the API Key is invalid + ErrFormat = errs.Class("api key format error") + // ErrInvalid means that the API Key is improperly signed + ErrInvalid = errs.Class("api key invalid error") + // ErrUnauthorized means that the API key does not grant the requested permission + ErrUnauthorized = errs.Class("api key unauthorized error") + // ErrRevoked means the API key has been revoked + ErrRevoked = errs.Class("api key revocation error") + + mon = monkit.Package() +) + +// ActionType specifies the operation type being performed that the Macaroon will validate +type ActionType int + +const ( + // not using iota because these values are persisted in macaroons + _ ActionType = 0 + + // ActionRead specifies a read operation + ActionRead ActionType = 1 + // ActionWrite specifies a read operation + ActionWrite ActionType = 2 + // ActionList specifies a read operation + ActionList ActionType = 3 + // ActionDelete specifies a read operation + ActionDelete ActionType = 4 + // ActionProjectInfo requests project-level information + ActionProjectInfo ActionType = 5 +) + +// Action specifies the specific operation being performed that the Macaroon will validate +type Action struct { + Op ActionType + Bucket []byte + EncryptedPath []byte + Time time.Time +} + +// APIKey implements a Macaroon-backed Storj-v3 API key. +type APIKey struct { + mac *Macaroon +} + +// ParseAPIKey parses a given api key string and returns an APIKey if the +// APIKey was correctly formatted. It does not validate the key. +func ParseAPIKey(key string) (*APIKey, error) { + data, version, err := base58.CheckDecode(key) + if err != nil || version != 0 { + return nil, ErrFormat.New("invalid api key format") + } + mac, err := ParseMacaroon(data) + if err != nil { + return nil, ErrFormat.Wrap(err) + } + return &APIKey{mac: mac}, nil +} + +// ParseRawAPIKey parses raw api key data and returns an APIKey if the APIKey +// was correctly formatted. It does not validate the key. +func ParseRawAPIKey(data []byte) (*APIKey, error) { + mac, err := ParseMacaroon(data) + if err != nil { + return nil, ErrFormat.Wrap(err) + } + return &APIKey{mac: mac}, nil +} + +// NewAPIKey generates a brand new unrestricted API key given the provided +// server project secret +func NewAPIKey(secret []byte) (*APIKey, error) { + mac, err := NewUnrestricted(secret) + if err != nil { + return nil, Error.Wrap(err) + } + return &APIKey{mac: mac}, nil +} + +// Check makes sure that the key authorizes the provided action given the root +// project secret and any possible revocations, returning an error if the action +// is not authorized. 'revoked' is a list of revoked heads. +func (a *APIKey) Check(ctx context.Context, secret []byte, action Action, revoked [][]byte) (err error) { + defer mon.Task()(&ctx)(&err) + if !a.mac.Validate(secret) { + return ErrInvalid.New("macaroon unauthorized") + } + + // a timestamp is always required on an action + if action.Time.IsZero() { + return Error.New("no timestamp provided") + } + + caveats := a.mac.Caveats() + for _, cavbuf := range caveats { + var cav Caveat + err := pb.Unmarshal(cavbuf, &cav) + if err != nil { + return ErrFormat.New("invalid caveat format") + } + if !cav.Allows(action) { + return ErrUnauthorized.New("action disallowed") + } + } + + head := a.mac.Head() + for _, revokedID := range revoked { + if bytes.Equal(revokedID, head) { + return ErrRevoked.New("macaroon head revoked") + } + } + + return nil +} + +// AllowedBuckets stores information about which buckets are +// allowed to be accessed, where `Buckets` stores names of buckets that are +// allowed and `All` is a bool that indicates if all buckets are allowed or not +type AllowedBuckets struct { + All bool + Buckets map[string]struct{} +} + +// GetAllowedBuckets returns a list of all the allowed bucket paths that match the Action operation +func (a *APIKey) GetAllowedBuckets(ctx context.Context, action Action) (allowed AllowedBuckets, err error) { + defer mon.Task()(&ctx)(&err) + + // Every bucket is allowed until we find a caveat that restricts some paths. + allowed.All = true + + // every caveat that includes a list of allowed paths must include the bucket for + // the bucket to be allowed. in other words, the set of allowed buckets is the + // intersection of all of the buckets in the allowed paths. + for _, cavbuf := range a.mac.Caveats() { + var cav Caveat + err := pb.Unmarshal(cavbuf, &cav) + if err != nil { + return AllowedBuckets{}, ErrFormat.New("invalid caveat format: %v", err) + } + if !cav.Allows(action) { + return AllowedBuckets{}, ErrUnauthorized.New("action disallowed") + } + + // If the caveat does not include any allowed paths, then it is not restricting it. + if len(cav.AllowedPaths) == 0 { + continue + } + + // Since we found some path restrictions, it's definitely the case that not every + // bucket is allowed. + allowed.All = false + + caveatBuckets := map[string]struct{}{} + for _, caveatPath := range cav.AllowedPaths { + caveatBuckets[string(caveatPath.Bucket)] = struct{}{} + } + + if allowed.Buckets == nil { + allowed.Buckets = caveatBuckets + } else { + for bucket := range allowed.Buckets { + if _, ok := caveatBuckets[bucket]; !ok { + delete(allowed.Buckets, bucket) + } + } + } + } + + return allowed, err +} + +// Restrict generates a new APIKey with the provided Caveat attached. +func (a *APIKey) Restrict(caveat Caveat) (*APIKey, error) { + buf, err := pb.Marshal(&caveat) + if err != nil { + return nil, Error.Wrap(err) + } + mac, err := a.mac.AddFirstPartyCaveat(buf) + if err != nil { + return nil, Error.Wrap(err) + } + return &APIKey{mac: mac}, nil +} + +// Head returns the identifier for this macaroon's root ancestor. +func (a *APIKey) Head() []byte { + return a.mac.Head() +} + +// Tail returns the identifier for this macaroon only. +func (a *APIKey) Tail() []byte { + return a.mac.Tail() +} + +// Serialize serializes the API Key to a string +func (a *APIKey) Serialize() string { + return base58.CheckEncode(a.mac.Serialize(), 0) +} + +// SerializeRaw serialize the API Key to raw bytes +func (a *APIKey) SerializeRaw() []byte { + return a.mac.Serialize() +} + +// Allows returns true if the provided action is allowed by the caveat. +func (c *Caveat) Allows(action Action) bool { + // if the action is after the caveat's "not after" field, then it is invalid + if c.NotAfter != nil && action.Time.After(*c.NotAfter) { + return false + } + // if the caveat's "not before" field is *after* the action, then the action + // is before the "not before" field and it is invalid + if c.NotBefore != nil && c.NotBefore.After(action.Time) { + return false + } + + // we want to always allow reads for bucket metadata, perhaps filtered by the + // buckets in the allowed paths. + if action.Op == ActionRead && len(action.EncryptedPath) == 0 { + if len(c.AllowedPaths) == 0 { + return true + } + if len(action.Bucket) == 0 { + // if no action.bucket name is provided, then this call is checking that + // we can list all buckets. In that case, return true here and we will + // filter out buckets that aren't allowed later with `GetAllowedBuckets()` + return true + } + for _, path := range c.AllowedPaths { + if bytes.Equal(path.Bucket, action.Bucket) { + return true + } + } + return false + } + + switch action.Op { + case ActionRead: + if c.DisallowReads { + return false + } + case ActionWrite: + if c.DisallowWrites { + return false + } + case ActionList: + if c.DisallowLists { + return false + } + case ActionDelete: + if c.DisallowDeletes { + return false + } + case ActionProjectInfo: + // allow + default: + return false + } + + if len(c.AllowedPaths) > 0 && action.Op != ActionProjectInfo { + found := false + for _, path := range c.AllowedPaths { + if bytes.Equal(action.Bucket, path.Bucket) && + bytes.HasPrefix(action.EncryptedPath, path.EncryptedPathPrefix) { + found = true + break + } + } + if !found { + return false + } + } + + return true +} diff --git a/vendor/storj.io/common/macaroon/caveat.go b/vendor/storj.io/common/macaroon/caveat.go new file mode 100644 index 000000000..1d451a997 --- /dev/null +++ b/vendor/storj.io/common/macaroon/caveat.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package macaroon + +import ( + "crypto/rand" +) + +// NewCaveat returns a Caveat with a random generated nonce. +func NewCaveat() (Caveat, error) { + var buf [8]byte + _, err := rand.Read(buf[:]) + return Caveat{Nonce: buf[:]}, err +} diff --git a/vendor/storj.io/common/macaroon/doc.go b/vendor/storj.io/common/macaroon/doc.go new file mode 100644 index 000000000..0aa7ef854 --- /dev/null +++ b/vendor/storj.io/common/macaroon/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package macaroon implements contextual caveats and authorization. +package macaroon diff --git a/vendor/storj.io/common/macaroon/macaroon.go b/vendor/storj.io/common/macaroon/macaroon.go new file mode 100644 index 000000000..973d0d9e4 --- /dev/null +++ b/vendor/storj.io/common/macaroon/macaroon.go @@ -0,0 +1,128 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package macaroon + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "crypto/subtle" +) + +// Macaroon is a struct that determine contextual caveats and authorization +type Macaroon struct { + head []byte + caveats [][]byte + tail []byte +} + +// NewUnrestricted creates Macaroon with random Head and generated Tail +func NewUnrestricted(secret []byte) (*Macaroon, error) { + head, err := NewSecret() + if err != nil { + return nil, err + } + return &Macaroon{ + head: head, + tail: sign(secret, head), + }, nil +} + +func sign(secret []byte, data []byte) []byte { + signer := hmac.New(sha256.New, secret) + _, err := signer.Write(data) + if err != nil { + // Error skipped because sha256 does not return error + panic(err) + } + + return signer.Sum(nil) +} + +// NewSecret generates cryptographically random 32 bytes +func NewSecret() (secret []byte, err error) { + secret = make([]byte, 32) + + _, err = rand.Read(secret) + if err != nil { + return nil, err + } + + return secret, nil +} + +// AddFirstPartyCaveat creates signed macaroon with appended caveat +func (m *Macaroon) AddFirstPartyCaveat(c []byte) (macaroon *Macaroon, err error) { + macaroon = m.Copy() + + macaroon.caveats = append(macaroon.caveats, c) + macaroon.tail = sign(macaroon.tail, c) + + return macaroon, nil +} + +// Validate reconstructs with all caveats from the secret and compares tails, +// returning true if the tails match +func (m *Macaroon) Validate(secret []byte) (ok bool) { + tail := sign(secret, m.head) + for _, cav := range m.caveats { + tail = sign(tail, cav) + } + + return subtle.ConstantTimeCompare(tail, m.tail) == 1 +} + +// Tails returns all ancestor tails up to and including the current tail +func (m *Macaroon) Tails(secret []byte) [][]byte { + tails := make([][]byte, 0, len(m.caveats)+1) + tail := sign(secret, m.head) + tails = append(tails, tail) + for _, cav := range m.caveats { + tail = sign(tail, cav) + tails = append(tails, tail) + } + return tails +} + +// Head returns copy of macaroon head +func (m *Macaroon) Head() (head []byte) { + if len(m.head) == 0 { + return nil + } + return append([]byte(nil), m.head...) +} + +// CaveatLen returns the number of caveats this macaroon has +func (m *Macaroon) CaveatLen() int { + return len(m.caveats) +} + +// Caveats returns copy of macaroon caveats +func (m *Macaroon) Caveats() (caveats [][]byte) { + if len(m.caveats) == 0 { + return nil + } + caveats = make([][]byte, 0, len(m.caveats)) + for _, cav := range m.caveats { + caveats = append(caveats, append([]byte(nil), cav...)) + } + return caveats +} + +// Tail returns copy of macaroon tail +func (m *Macaroon) Tail() (tail []byte) { + if len(m.tail) == 0 { + return nil + } + return append([]byte(nil), m.tail...) +} + +// Copy return copy of macaroon +func (m *Macaroon) Copy() *Macaroon { + return &Macaroon{ + head: m.Head(), + caveats: m.Caveats(), + tail: m.Tail(), + } +} diff --git a/vendor/storj.io/common/macaroon/serialize.go b/vendor/storj.io/common/macaroon/serialize.go new file mode 100644 index 000000000..8bd15065c --- /dev/null +++ b/vendor/storj.io/common/macaroon/serialize.go @@ -0,0 +1,215 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package macaroon + +import ( + "encoding/binary" + "errors" +) + +type fieldType int + +const ( + fieldEOS fieldType = 0 + fieldLocation fieldType = 1 + fieldIdentifier fieldType = 2 + fieldVerificationID fieldType = 4 + fieldSignature fieldType = 6 +) + +const ( + version byte = 2 +) + +type packet struct { + fieldType fieldType + data []byte +} + +// Serialize converts macaroon to binary format +func (m *Macaroon) Serialize() (data []byte) { + // Start data from version int + data = append(data, version) + + // Serilize Identity + data = serializePacket(data, packet{ + fieldType: fieldIdentifier, + data: m.head, + }) + data = append(data, 0) + + // Serialize caveats + for _, cav := range m.caveats { + data = serializePacket(data, packet{ + fieldType: fieldIdentifier, + data: cav, + }) + data = append(data, 0) + } + + data = append(data, 0) + + // Serialize tail + data = serializePacket(data, packet{ + fieldType: fieldSignature, + data: m.tail, + }) + + return data +} + +// serializePacket converts packet to binary +func serializePacket(data []byte, p packet) []byte { + data = appendVarint(data, int(p.fieldType)) + data = appendVarint(data, len(p.data)) + data = append(data, p.data...) + + return data +} + +func appendVarint(data []byte, x int) []byte { + var buf [binary.MaxVarintLen32]byte + n := binary.PutUvarint(buf[:], uint64(x)) + + return append(data, buf[:n]...) +} + +// ParseMacaroon converts binary to macaroon +func ParseMacaroon(data []byte) (_ *Macaroon, err error) { + if len(data) < 2 { + return nil, errors.New("empty macaroon") + } + if data[0] != version { + return nil, errors.New("invalid macaroon version") + } + // skip version + data = data[1:] + // Parse Location + data, section, err := parseSection(data) + if err != nil { + return nil, err + } + if len(section) > 0 && section[0].fieldType == fieldLocation { + section = section[1:] + } + if len(section) != 1 || section[0].fieldType != fieldIdentifier { + return nil, errors.New("invalid macaroon header") + } + + mac := Macaroon{} + mac.head = section[0].data + for { + rest, section, err := parseSection(data) + if err != nil { + return nil, err + } + data = rest + if len(section) == 0 { + break + } + if len(section) > 0 && section[0].fieldType == fieldLocation { + section = section[1:] + } + if len(section) == 0 || section[0].fieldType != fieldIdentifier { + return nil, errors.New("no Identifier in caveat") + } + cav := append([]byte(nil), section[0].data...) + section = section[1:] + if len(section) == 0 { + // First party caveat. + //if cav.Location != "" { + // return nil, errors.New("location not allowed in first party caveat") + //} + mac.caveats = append(mac.caveats, cav) + continue + } + if len(section) != 1 { + return nil, errors.New("extra fields found in caveat") + } + if section[0].fieldType != fieldVerificationID { + return nil, errors.New("invalid field found in caveat") + } + //cav.VerificationId = section[0].data + mac.caveats = append(mac.caveats, cav) + } + _, sig, err := parsePacket(data) + if err != nil { + return nil, err + } + if sig.fieldType != fieldSignature { + return nil, errors.New("unexpected field found instead of signature") + } + if len(sig.data) != 32 { + return nil, errors.New("signature has unexpected length") + } + mac.tail = make([]byte, 32) + copy(mac.tail, sig.data) + //return data, nil + // Parse Identity + // Parse caveats + // Parse tail + return &mac, nil +} + +// parseSection returns data leftover and packet array +func parseSection(data []byte) ([]byte, []packet, error) { + prevFieldType := fieldType(-1) + var packets []packet + for { + if len(data) == 0 { + return nil, nil, errors.New("section extends past end of buffer") + } + rest, p, err := parsePacket(data) + if err != nil { + return nil, nil, err + } + if p.fieldType == fieldEOS { + return rest, packets, nil + } + if p.fieldType <= prevFieldType { + return nil, nil, errors.New("fields out of order") + } + packets = append(packets, p) + prevFieldType = p.fieldType + data = rest + } +} + +// parsePacket returns data leftover and packet +func parsePacket(data []byte) ([]byte, packet, error) { + data, ft, err := parseVarint(data) + if err != nil { + return nil, packet{}, err + } + + p := packet{fieldType: fieldType(ft)} + if p.fieldType == fieldEOS { + return data, p, nil + } + data, packLen, err := parseVarint(data) + if err != nil { + return nil, packet{}, err + } + + if packLen > len(data) { + return nil, packet{}, errors.New("out of bounds") + } + if packLen == 0 { + p.data = nil + + return data, p, nil + } + + p.data = data[0:packLen] + + return data[packLen:], p, nil +} + +func parseVarint(data []byte) ([]byte, int, error) { + value, n := binary.Uvarint(data) + if n <= 0 || value > 0x7fffffff { + return nil, 0, errors.New("varint error") + } + return data[n:], int(value), nil +} diff --git a/vendor/storj.io/common/macaroon/types.pb.go b/vendor/storj.io/common/macaroon/types.pb.go new file mode 100644 index 000000000..6a04afaad --- /dev/null +++ b/vendor/storj.io/common/macaroon/types.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types.proto + +package macaroon + +import ( + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Caveat struct { + // if any of these three are set, disallow that type of access + DisallowReads bool `protobuf:"varint,1,opt,name=disallow_reads,json=disallowReads,proto3" json:"disallow_reads,omitempty"` + DisallowWrites bool `protobuf:"varint,2,opt,name=disallow_writes,json=disallowWrites,proto3" json:"disallow_writes,omitempty"` + DisallowLists bool `protobuf:"varint,3,opt,name=disallow_lists,json=disallowLists,proto3" json:"disallow_lists,omitempty"` + DisallowDeletes bool `protobuf:"varint,4,opt,name=disallow_deletes,json=disallowDeletes,proto3" json:"disallow_deletes,omitempty"` + AllowedPaths []*Caveat_Path `protobuf:"bytes,10,rep,name=allowed_paths,json=allowedPaths,proto3" json:"allowed_paths,omitempty"` + // if set, the validity time window + NotAfter *time.Time `protobuf:"bytes,20,opt,name=not_after,json=notAfter,proto3,stdtime" json:"not_after,omitempty"` + NotBefore *time.Time `protobuf:"bytes,21,opt,name=not_before,json=notBefore,proto3,stdtime" json:"not_before,omitempty"` + // nonce is set to some random bytes so that you can make arbitrarily + // many restricted macaroons with the same (or no) restrictions. + Nonce []byte `protobuf:"bytes,30,opt,name=nonce,proto3" json:"nonce,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Caveat) Reset() { *m = Caveat{} } +func (m *Caveat) String() string { return proto.CompactTextString(m) } +func (*Caveat) ProtoMessage() {} +func (*Caveat) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0} +} +func (m *Caveat) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Caveat.Unmarshal(m, b) +} +func (m *Caveat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Caveat.Marshal(b, m, deterministic) +} +func (m *Caveat) XXX_Merge(src proto.Message) { + xxx_messageInfo_Caveat.Merge(m, src) +} +func (m *Caveat) XXX_Size() int { + return xxx_messageInfo_Caveat.Size(m) +} +func (m *Caveat) XXX_DiscardUnknown() { + xxx_messageInfo_Caveat.DiscardUnknown(m) +} + +var xxx_messageInfo_Caveat proto.InternalMessageInfo + +func (m *Caveat) GetDisallowReads() bool { + if m != nil { + return m.DisallowReads + } + return false +} + +func (m *Caveat) GetDisallowWrites() bool { + if m != nil { + return m.DisallowWrites + } + return false +} + +func (m *Caveat) GetDisallowLists() bool { + if m != nil { + return m.DisallowLists + } + return false +} + +func (m *Caveat) GetDisallowDeletes() bool { + if m != nil { + return m.DisallowDeletes + } + return false +} + +func (m *Caveat) GetAllowedPaths() []*Caveat_Path { + if m != nil { + return m.AllowedPaths + } + return nil +} + +func (m *Caveat) GetNotAfter() *time.Time { + if m != nil { + return m.NotAfter + } + return nil +} + +func (m *Caveat) GetNotBefore() *time.Time { + if m != nil { + return m.NotBefore + } + return nil +} + +func (m *Caveat) GetNonce() []byte { + if m != nil { + return m.Nonce + } + return nil +} + +// If any entries exist, require all access to happen in at least +// one of them. +type Caveat_Path struct { + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPathPrefix []byte `protobuf:"bytes,2,opt,name=encrypted_path_prefix,json=encryptedPathPrefix,proto3" json:"encrypted_path_prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Caveat_Path) Reset() { *m = Caveat_Path{} } +func (m *Caveat_Path) String() string { return proto.CompactTextString(m) } +func (*Caveat_Path) ProtoMessage() {} +func (*Caveat_Path) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{0, 0} +} +func (m *Caveat_Path) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Caveat_Path.Unmarshal(m, b) +} +func (m *Caveat_Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Caveat_Path.Marshal(b, m, deterministic) +} +func (m *Caveat_Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Caveat_Path.Merge(m, src) +} +func (m *Caveat_Path) XXX_Size() int { + return xxx_messageInfo_Caveat_Path.Size(m) +} +func (m *Caveat_Path) XXX_DiscardUnknown() { + xxx_messageInfo_Caveat_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Caveat_Path proto.InternalMessageInfo + +func (m *Caveat_Path) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Caveat_Path) GetEncryptedPathPrefix() []byte { + if m != nil { + return m.EncryptedPathPrefix + } + return nil +} + +func init() { + proto.RegisterType((*Caveat)(nil), "macaroon.Caveat") + proto.RegisterType((*Caveat_Path)(nil), "macaroon.Caveat.Path") +} + +func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } + +var fileDescriptor_d938547f84707355 = []byte{ + // 343 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x50, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x25, 0xaf, 0x7d, 0xa5, 0xef, 0x36, 0x7d, 0xef, 0x31, 0xb6, 0x12, 0xb2, 0xb0, 0x41, 0x10, + 0xe3, 0x66, 0x0a, 0x75, 0x27, 0x88, 0x58, 0x5d, 0xba, 0x28, 0x83, 0xe0, 0x32, 0x4c, 0x92, 0x9b, + 0x34, 0x98, 0x66, 0xc2, 0xcc, 0xd4, 0xda, 0xbf, 0xf0, 0xd3, 0xfc, 0x03, 0x7f, 0x45, 0x66, 0xd2, + 0x04, 0xba, 0x73, 0x79, 0xce, 0x3d, 0xe7, 0xdc, 0x7b, 0x0f, 0x8c, 0xf4, 0xbe, 0x46, 0x45, 0x6b, + 0x29, 0xb4, 0x20, 0xc3, 0x0d, 0x4f, 0xb8, 0x14, 0xa2, 0xf2, 0x21, 0x17, 0xb9, 0x68, 0x58, 0x7f, + 0x96, 0x0b, 0x91, 0x97, 0x38, 0xb7, 0x28, 0xde, 0x66, 0x73, 0x5d, 0x6c, 0x50, 0x69, 0xbe, 0xa9, + 0x1b, 0xc1, 0xf9, 0x67, 0x0f, 0x06, 0x0f, 0xfc, 0x0d, 0xb9, 0x26, 0x17, 0xf0, 0x37, 0x2d, 0x14, + 0x2f, 0x4b, 0xb1, 0x8b, 0x24, 0xf2, 0x54, 0x79, 0x4e, 0xe0, 0x84, 0x43, 0x36, 0x6e, 0x59, 0x66, + 0x48, 0x72, 0x09, 0xff, 0x3a, 0xd9, 0x4e, 0x16, 0x1a, 0x95, 0xf7, 0xcb, 0xea, 0x3a, 0xf7, 0x8b, + 0x65, 0x8f, 0xf2, 0xca, 0x42, 0x69, 0xe5, 0xf5, 0x8e, 0xf3, 0x9e, 0x0c, 0x49, 0xae, 0xe0, 0x7f, + 0x27, 0x4b, 0xb1, 0x44, 0x13, 0xd8, 0xb7, 0xc2, 0x6e, 0xcf, 0x63, 0x43, 0x93, 0x1b, 0x18, 0x5b, + 0x8c, 0x69, 0x54, 0x73, 0xbd, 0x56, 0x1e, 0x04, 0xbd, 0x70, 0xb4, 0x98, 0xd2, 0xf6, 0x77, 0xda, + 0xbc, 0x42, 0x57, 0x5c, 0xaf, 0x99, 0x7b, 0xd0, 0x1a, 0xa0, 0xc8, 0x2d, 0xfc, 0xa9, 0x84, 0x8e, + 0x78, 0xa6, 0x51, 0x7a, 0x93, 0xc0, 0x09, 0x47, 0x0b, 0x9f, 0x36, 0xed, 0xd0, 0xb6, 0x1d, 0xfa, + 0xdc, 0xb6, 0xb3, 0xec, 0x7f, 0x7c, 0xcd, 0x1c, 0x36, 0xac, 0x84, 0xbe, 0x37, 0x0e, 0x72, 0x07, + 0x60, 0xec, 0x31, 0x66, 0x42, 0xa2, 0x37, 0xfd, 0xa1, 0xdf, 0xac, 0x5c, 0x5a, 0x0b, 0x99, 0xc0, + 0xef, 0x4a, 0x54, 0x09, 0x7a, 0x67, 0x81, 0x13, 0xba, 0xac, 0x01, 0x3e, 0x83, 0xbe, 0x39, 0x8f, + 0x9c, 0xc2, 0x20, 0xde, 0x26, 0xaf, 0xa8, 0x6d, 0xe7, 0x2e, 0x3b, 0x20, 0xb2, 0x80, 0x29, 0x56, + 0x89, 0xdc, 0xd7, 0xfa, 0xf0, 0x73, 0x54, 0x4b, 0xcc, 0x8a, 0x77, 0x5b, 0xb9, 0xcb, 0x4e, 0xba, + 0xa1, 0x49, 0x59, 0xd9, 0x51, 0x3c, 0xb0, 0xe7, 0x5c, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xca, + 0x7b, 0x7d, 0xfc, 0x1f, 0x02, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/macaroon/types.proto b/vendor/storj.io/common/macaroon/types.proto new file mode 100644 index 000000000..c71567d92 --- /dev/null +++ b/vendor/storj.io/common/macaroon/types.proto @@ -0,0 +1,33 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; + +package macaroon; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +message Caveat { + // if any of these three are set, disallow that type of access + bool disallow_reads = 1; + bool disallow_writes = 2; + bool disallow_lists = 3; + bool disallow_deletes = 4; + + // If any entries exist, require all access to happen in at least + // one of them. + message Path { + bytes bucket = 1; + bytes encrypted_path_prefix = 2; + } + repeated Path allowed_paths = 10; + + // if set, the validity time window + google.protobuf.Timestamp not_after = 20 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp not_before = 21 [(gogoproto.stdtime) = true]; + + // nonce is set to some random bytes so that you can make arbitrarily + // many restricted macaroons with the same (or no) restrictions. + bytes nonce = 30; +} diff --git a/vendor/storj.io/common/memory/doc.go b/vendor/storj.io/common/memory/doc.go new file mode 100644 index 000000000..b7f3e0264 --- /dev/null +++ b/vendor/storj.io/common/memory/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package memory contains byte size types and manipulation. +package memory diff --git a/vendor/storj.io/common/memory/size.go b/vendor/storj.io/common/memory/size.go new file mode 100644 index 000000000..b34ff01e9 --- /dev/null +++ b/vendor/storj.io/common/memory/size.go @@ -0,0 +1,247 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package memory + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +// base 2 and base 10 sizes +const ( + B Size = 1 << (10 * iota) + KiB + MiB + GiB + TiB + PiB + EiB + + KB Size = 1e3 + MB Size = 1e6 + GB Size = 1e9 + TB Size = 1e12 + PB Size = 1e15 + EB Size = 1e18 +) + +// Size implements flag.Value for collecting memory size in bytes +type Size int64 + +// Int returns bytes size as int +func (size Size) Int() int { return int(size) } + +// Int32 returns bytes size as int32 +func (size Size) Int32() int32 { return int32(size) } + +// Int64 returns bytes size as int64 +func (size Size) Int64() int64 { return int64(size) } + +// Float64 returns bytes size as float64 +func (size Size) Float64() float64 { return float64(size) } + +// KiB returns size in kibibytes +func (size Size) KiB() float64 { return size.Float64() / KiB.Float64() } + +// MiB returns size in mebibytes +func (size Size) MiB() float64 { return size.Float64() / MiB.Float64() } + +// GiB returns size in gibibytes +func (size Size) GiB() float64 { return size.Float64() / GiB.Float64() } + +// TiB returns size in tebibytes +func (size Size) TiB() float64 { return size.Float64() / TiB.Float64() } + +// PiB returns size in pebibytes +func (size Size) PiB() float64 { return size.Float64() / PiB.Float64() } + +// EiB returns size in exbibytes +func (size Size) EiB() float64 { return size.Float64() / EiB.Float64() } + +// KB returns size in kilobytes +func (size Size) KB() float64 { return size.Float64() / KB.Float64() } + +// MB returns size in megabytes +func (size Size) MB() float64 { return size.Float64() / MB.Float64() } + +// GB returns size in gigabytes +func (size Size) GB() float64 { return size.Float64() / GB.Float64() } + +// TB returns size in terabytes +func (size Size) TB() float64 { return size.Float64() / TB.Float64() } + +// PB returns size in petabytes +func (size Size) PB() float64 { return size.Float64() / PB.Float64() } + +// EB returns size in exabytes +func (size Size) EB() float64 { return size.Float64() / EB.Float64() } + +// String converts size to a string using base-2 prefixes, unless the number +// appears to be in base 10. +func (size Size) String() string { + if countZeros(int64(size), 1000) > countZeros(int64(size), 1024) { + return size.Base10String() + } + return size.Base2String() +} + +// countZeros considers a number num in base base. It counts zeros of that +// number in that base from least significant to most, stopping when a non-zero +// value is hit. +func countZeros(num, base int64) (count int) { + for num != 0 && num%base == 0 { + num /= base + count++ + } + return count +} + +// Base2String converts size to a string using base-2 prefixes +func (size Size) Base2String() string { + if size == 0 { + return "0 B" + } + + switch { + case abs(size) >= EiB*2/3: + return fmt.Sprintf("%.1f EiB", size.EiB()) + case abs(size) >= PiB*2/3: + return fmt.Sprintf("%.1f PiB", size.PiB()) + case abs(size) >= TiB*2/3: + return fmt.Sprintf("%.1f TiB", size.TiB()) + case abs(size) >= GiB*2/3: + return fmt.Sprintf("%.1f GiB", size.GiB()) + case abs(size) >= MiB*2/3: + return fmt.Sprintf("%.1f MiB", size.MiB()) + case abs(size) >= KiB*2/3: + return fmt.Sprintf("%.1f KiB", size.KiB()) + } + + return strconv.FormatInt(size.Int64(), 10) + " B" +} + +// Base10String converts size to a string using base-10 prefixes +func (size Size) Base10String() string { + if size == 0 { + return "0 B" + } + + switch { + case abs(size) >= EB*2/3: + return fmt.Sprintf("%.1f EB", size.EB()) + case abs(size) >= PB*2/3: + return fmt.Sprintf("%.1f PB", size.PB()) + case abs(size) >= TB*2/3: + return fmt.Sprintf("%.1f TB", size.TB()) + case abs(size) >= GB*2/3: + return fmt.Sprintf("%.1f GB", size.GB()) + case abs(size) >= MB*2/3: + return fmt.Sprintf("%.1f MB", size.MB()) + case abs(size) >= KB*2/3: + return fmt.Sprintf("%.1f KB", size.KB()) + } + + return strconv.FormatInt(size.Int64(), 10) + " B" +} + +func abs(size Size) Size { + if size > 0 { + return size + } + return -size +} + +func isLetter(b byte) bool { + return ('a' <= b && b <= 'z') || ('A' <= b && b <= 'Z') +} + +// Set updates value from string +func (size *Size) Set(s string) error { + if s == "" { + return errors.New("empty size") + } + + p := len(s) + for isLetter(s[p-1]) { + p-- + + if p < 0 { + return errors.New("p out of bounds") + } + } + + value, suffix := s[:p], s[p:] + suffix = strings.ToUpper(suffix) + if suffix == "" || suffix[len(suffix)-1] != 'B' { + suffix += "B" + } + + value = strings.TrimSpace(value) + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + + switch suffix { + case "EB": + *size = Size(v * EB.Float64()) + case "EIB": + *size = Size(v * EiB.Float64()) + case "PB": + *size = Size(v * PB.Float64()) + case "PIB": + *size = Size(v * PiB.Float64()) + case "TB": + *size = Size(v * TB.Float64()) + case "TIB": + *size = Size(v * TiB.Float64()) + case "GB": + *size = Size(v * GB.Float64()) + case "GIB": + *size = Size(v * GiB.Float64()) + case "MB": + *size = Size(v * MB.Float64()) + case "MIB": + *size = Size(v * MiB.Float64()) + case "KB": + *size = Size(v * KB.Float64()) + case "KIB": + *size = Size(v * KiB.Float64()) + case "B", "": + *size = Size(v) + default: + return fmt.Errorf("unknown suffix %q", suffix) + } + + return nil +} + +// Type implements pflag.Value +func (Size) Type() string { return "memory.Size" } + +// MarshalText returns size as a string. +func (size Size) MarshalText() (string, error) { + return size.String(), nil +} + +// UnmarshalText parses text as a string. +func (size *Size) UnmarshalText(text []byte) error { + return size.Set(string(text)) +} + +// MarshalJSON returns size as a json string. +func (size Size) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(size.String())), nil +} + +// UnmarshalJSON parses text from a json string. +func (size *Size) UnmarshalJSON(text []byte) error { + unquoted, err := strconv.Unquote(string(text)) + if err != nil { + return err + } + return size.Set(unquoted) +} diff --git a/vendor/storj.io/common/memory/sizes.go b/vendor/storj.io/common/memory/sizes.go new file mode 100644 index 000000000..803154aa1 --- /dev/null +++ b/vendor/storj.io/common/memory/sizes.go @@ -0,0 +1,42 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package memory + +import "strings" + +// Sizes implements flag.Value for collecting memory size +type Sizes struct { + Default []Size + Custom []Size +} + +// Sizes returns the loaded values +func (sizes Sizes) Sizes() []Size { + if len(sizes.Custom) > 0 { + return sizes.Custom + } + return sizes.Default +} + +// String converts values to a string +func (sizes Sizes) String() string { + sz := sizes.Sizes() + xs := make([]string, len(sz)) + for i, size := range sz { + xs[i] = size.String() + } + return strings.Join(xs, " ") +} + +// Set adds values from byte values +func (sizes *Sizes) Set(s string) error { + for _, x := range strings.Fields(s) { + var size Size + if err := size.Set(x); err != nil { + return err + } + sizes.Custom = append(sizes.Custom, size) + } + return nil +} diff --git a/vendor/storj.io/common/memory/string.go b/vendor/storj.io/common/memory/string.go new file mode 100644 index 000000000..25ae05a8c --- /dev/null +++ b/vendor/storj.io/common/memory/string.go @@ -0,0 +1,16 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package memory + +// FormatBytes converts number of bytes to appropriately sized string +func FormatBytes(bytes int64) string { + return Size(bytes).String() +} + +// ParseString converts string to number of bytes +func ParseString(s string) (int64, error) { + var size Size + err := size.Set(s) + return size.Int64(), err +} diff --git a/vendor/storj.io/common/netutil/common.go b/vendor/storj.io/common/netutil/common.go new file mode 100644 index 000000000..9b9b147ae --- /dev/null +++ b/vendor/storj.io/common/netutil/common.go @@ -0,0 +1,8 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package netutil + +import "github.com/spacemonkeygo/monkit/v3" + +var mon = monkit.Package() diff --git a/vendor/storj.io/common/netutil/timeout_linux.go b/vendor/storj.io/common/netutil/timeout_linux.go new file mode 100644 index 000000000..381172f3b --- /dev/null +++ b/vendor/storj.io/common/netutil/timeout_linux.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build linux + +package netutil + +import ( + "errors" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// SetUserTimeout sets the TCP_USER_TIMEOUT setting on the provided conn. +func SetUserTimeout(conn *net.TCPConn, timeout time.Duration) error { + // By default from Go, keep alive period + idle are ~15sec. The default + // keep count is 8 according to some kernel docs. That means it should + // fail after ~120 seconds. Unfortunately, keep alive only happens if + // there is no send-q on the socket, and so a slow reader can still cause + // hanging sockets forever. By setting user timeout, we will kill the + // connection if any writes go unacknowledged for the amount of time. + // This should close the keep alive hole. + // + // See https://blog.cloudflare.com/when-tcp-sockets-refuse-to-die/ + + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + controlErr := rawConn.Control(func(fd uintptr) { + err = unix.SetsockoptInt(int(fd), unix.SOL_TCP, unix.TCP_USER_TIMEOUT, int(timeout.Milliseconds())) + }) + if controlErr != nil { + return controlErr + } + if ignoreProtocolNotAvailable(err) != nil { + return err + } + return nil +} + +// ignoreProtocolNotAvailable ignores the "protocol not available" error that +// is returned when netutil.SetUserTimeout is called if running on the Windows +// Subsystem for Linux (see Jira issue COM-23). +func ignoreProtocolNotAvailable(err error) error { + var errno syscall.Errno + if errors.As(err, &errno) { + if errno == syscall.ENOPROTOOPT { + return nil + } + } + return err +} diff --git a/vendor/storj.io/common/netutil/timeout_other.go b/vendor/storj.io/common/netutil/timeout_other.go new file mode 100644 index 000000000..99bde7da6 --- /dev/null +++ b/vendor/storj.io/common/netutil/timeout_other.go @@ -0,0 +1,16 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build !linux + +package netutil + +import ( + "net" + "time" +) + +// SetUserTimeout sets the TCP_USER_TIMEOUT setting on the provided conn. +func SetUserTimeout(conn *net.TCPConn, timeout time.Duration) error { + return nil +} diff --git a/vendor/storj.io/common/netutil/tracking.go b/vendor/storj.io/common/netutil/tracking.go new file mode 100644 index 000000000..1fb12cd60 --- /dev/null +++ b/vendor/storj.io/common/netutil/tracking.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package netutil + +import ( + "net" + "runtime" +) + +// closeTrackingConn wraps a net.Conn and keeps track of if it was closed +// or if it was leaked (and closes it if it was leaked.) +type closeTrackingConn struct { + net.Conn +} + +// TrackClose wraps the conn and sets a finalizer on the returned value to +// close the conn and monitor that it was leaked. +func TrackClose(conn net.Conn) net.Conn { + tracked := &closeTrackingConn{Conn: conn} + runtime.SetFinalizer(tracked, (*closeTrackingConn).finalize) + return tracked +} + +// Close clears the finalizer and closes the connection. +func (c *closeTrackingConn) Close() error { + runtime.SetFinalizer(c, nil) + mon.Event("connection_closed") + return c.Conn.Close() +} + +// finalize monitors that a connection was leaked and closes the connection. +func (c *closeTrackingConn) finalize() { + mon.Event("connection_leaked") + _ = c.Conn.Close() +} diff --git a/vendor/storj.io/common/paths/doc.go b/vendor/storj.io/common/paths/doc.go new file mode 100644 index 000000000..4ddc20d62 --- /dev/null +++ b/vendor/storj.io/common/paths/doc.go @@ -0,0 +1,6 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package paths implements wrappers for handling encrypted and unencrypted +// paths safely. +package paths diff --git a/vendor/storj.io/common/paths/path.go b/vendor/storj.io/common/paths/path.go new file mode 100644 index 000000000..6d60dcde7 --- /dev/null +++ b/vendor/storj.io/common/paths/path.go @@ -0,0 +1,153 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package paths + +import ( + "strings" +) + +// +// To avoid confusion about when paths are encrypted, unencrypted, empty or +// non existent, we create some wrapper types so that the compiler will complain +// if someone attempts to use one in the wrong context. +// + +// Unencrypted is an opaque type representing an unencrypted path. +type Unencrypted struct { + raw string +} + +// Encrypted is an opaque type representing an encrypted path. +type Encrypted struct { + raw string +} + +// +// unencrypted paths +// + +// NewUnencrypted takes a raw unencrypted path and returns it wrapped. +func NewUnencrypted(raw string) Unencrypted { + return Unencrypted{raw: raw} +} + +// Valid returns if the unencrypted path is valid, which is the same as not being empty. +func (path Unencrypted) Valid() bool { + return path.raw != "" +} + +// Raw returns the original raw path for the Unencrypted. +func (path Unencrypted) Raw() string { + return path.raw +} + +// String returns a human readable form of the Unencrypted. +func (path Unencrypted) String() string { + return path.Raw() +} + +// Consume attempts to remove the prefix from the Unencrypted path and +// reports a boolean indicating if it was able to do so. +func (path Unencrypted) Consume(prefix Unencrypted) (Unencrypted, bool) { + if len(path.raw) >= len(prefix.raw) && path.raw[:len(prefix.raw)] == prefix.raw { + return NewUnencrypted(path.raw[len(prefix.raw):]), true + } + return Unencrypted{}, false +} + +// Iterator returns an iterator over the components of the Unencrypted. +func (path Unencrypted) Iterator() Iterator { + return NewIterator(path.raw) +} + +// Less returns true if 'path' should be sorted earlier than 'other' +func (path Unencrypted) Less(other Unencrypted) bool { + return path.raw < other.raw +} + +// +// encrypted path +// + +// NewEncrypted takes a raw encrypted path and returns it wrapped. +func NewEncrypted(raw string) Encrypted { + return Encrypted{raw: raw} +} + +// Valid returns if the encrypted path is valid, which is the same as not being empty. +func (path Encrypted) Valid() bool { + return path.raw != "" +} + +// Raw returns the original path for the Encrypted. +func (path Encrypted) Raw() string { + return path.raw +} + +// String returns a human readable form of the Encrypted. +func (path Encrypted) String() string { + return path.Raw() +} + +// Consume attempts to remove the prefix from the Encrypted path and +// reports a boolean indicating if it was able to do so. +func (path Encrypted) Consume(prefix Encrypted) (Encrypted, bool) { + if len(path.raw) >= len(prefix.raw) && path.raw[:len(prefix.raw)] == prefix.raw { + return NewEncrypted(path.raw[len(prefix.raw):]), true + } + return Encrypted{}, false +} + +// Iterator returns an iterator over the components of the Encrypted. +func (path Encrypted) Iterator() Iterator { + return NewIterator(path.raw) +} + +// Less returns true if 'path' should be sorted earlier than 'other' +func (path Encrypted) Less(other Encrypted) bool { + return path.raw < other.raw +} + +// +// path component iteration +// + +// Iterator allows one to efficiently iterate over components of a path. +type Iterator struct { + raw string + consumed int + lastEmpty bool +} + +// NewIterator returns an Iterator for components of the provided raw path. +func NewIterator(raw string) Iterator { + return Iterator{raw: raw, lastEmpty: raw != ""} +} + +// Consumed reports how much of the path has been consumed (if any). +func (pi Iterator) Consumed() string { return pi.raw[:pi.consumed] } + +// Remaining reports how much of the path is remaining. +func (pi Iterator) Remaining() string { return pi.raw[pi.consumed:] } + +// Done reports if the path has been fully consumed. +func (pi Iterator) Done() bool { return len(pi.raw) == pi.consumed && !pi.lastEmpty } + +// Next returns the first component of the path, consuming it. +func (pi *Iterator) Next() string { + if pi.Done() { + return "" + } + + rem := pi.Remaining() + index := strings.IndexByte(rem, '/') + if index == -1 { + pi.consumed += len(rem) + pi.lastEmpty = false + return rem + } + pi.consumed += index + 1 + pi.lastEmpty = index == len(rem)-1 + return rem[:index] +} diff --git a/vendor/storj.io/common/pb/alias.go b/vendor/storj.io/common/pb/alias.go new file mode 100644 index 000000000..4a506c3af --- /dev/null +++ b/vendor/storj.io/common/pb/alias.go @@ -0,0 +1,12 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package pb + +import proto "github.com/gogo/protobuf/proto" + +// Unmarshal is an alias for proto.Unmarshal. +func Unmarshal(buf []byte, pb proto.Message) error { return proto.Unmarshal(buf, pb) } + +// Marshal is an alias for proto.Marshal. +func Marshal(pb proto.Message) ([]byte, error) { return proto.Marshal(pb) } diff --git a/vendor/storj.io/common/pb/certificate.pb.go b/vendor/storj.io/common/pb/certificate.pb.go new file mode 100644 index 000000000..0ef043c0c --- /dev/null +++ b/vendor/storj.io/common/pb/certificate.pb.go @@ -0,0 +1,206 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: certificate.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type SigningRequest struct { + AuthToken string `protobuf:"bytes,1,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SigningRequest) Reset() { *m = SigningRequest{} } +func (m *SigningRequest) String() string { return proto.CompactTextString(m) } +func (*SigningRequest) ProtoMessage() {} +func (*SigningRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_c0d34c34dd33be4b, []int{0} +} +func (m *SigningRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SigningRequest.Unmarshal(m, b) +} +func (m *SigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SigningRequest.Marshal(b, m, deterministic) +} +func (m *SigningRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SigningRequest.Merge(m, src) +} +func (m *SigningRequest) XXX_Size() int { + return xxx_messageInfo_SigningRequest.Size(m) +} +func (m *SigningRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SigningRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SigningRequest proto.InternalMessageInfo + +func (m *SigningRequest) GetAuthToken() string { + if m != nil { + return m.AuthToken + } + return "" +} + +func (m *SigningRequest) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +type SigningResponse struct { + Chain [][]byte `protobuf:"bytes,1,rep,name=chain,proto3" json:"chain,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SigningResponse) Reset() { *m = SigningResponse{} } +func (m *SigningResponse) String() string { return proto.CompactTextString(m) } +func (*SigningResponse) ProtoMessage() {} +func (*SigningResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c0d34c34dd33be4b, []int{1} +} +func (m *SigningResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SigningResponse.Unmarshal(m, b) +} +func (m *SigningResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SigningResponse.Marshal(b, m, deterministic) +} +func (m *SigningResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SigningResponse.Merge(m, src) +} +func (m *SigningResponse) XXX_Size() int { + return xxx_messageInfo_SigningResponse.Size(m) +} +func (m *SigningResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SigningResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SigningResponse proto.InternalMessageInfo + +func (m *SigningResponse) GetChain() [][]byte { + if m != nil { + return m.Chain + } + return nil +} + +func init() { + proto.RegisterType((*SigningRequest)(nil), "node.SigningRequest") + proto.RegisterType((*SigningResponse)(nil), "node.SigningResponse") +} + +func init() { proto.RegisterFile("certificate.proto", fileDescriptor_c0d34c34dd33be4b) } + +var fileDescriptor_c0d34c34dd33be4b = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x8f, 0xc1, 0x4a, 0xc6, 0x30, + 0x10, 0x06, 0xa9, 0xff, 0xaf, 0xd0, 0xa5, 0x28, 0x86, 0x0a, 0x45, 0x14, 0x4a, 0x2f, 0xf6, 0x94, + 0x82, 0x7d, 0x03, 0x7b, 0xf6, 0x12, 0x3d, 0x79, 0x91, 0x34, 0xae, 0x6d, 0x94, 0x64, 0x63, 0xb3, + 0x7d, 0x7f, 0x89, 0x05, 0x8b, 0x1e, 0x77, 0x60, 0x87, 0xf9, 0xe0, 0xd2, 0xe0, 0xc2, 0xf6, 0xdd, + 0x1a, 0xcd, 0x28, 0xc3, 0x42, 0x4c, 0xe2, 0xe8, 0xe9, 0x0d, 0x9b, 0x47, 0x38, 0x7f, 0xb2, 0x93, + 0xb7, 0x7e, 0x52, 0xf8, 0xb5, 0x62, 0x64, 0x71, 0x0b, 0xa0, 0x57, 0x9e, 0x5f, 0x99, 0x3e, 0xd1, + 0x57, 0x59, 0x9d, 0xb5, 0xb9, 0xca, 0x13, 0x79, 0x4e, 0x40, 0xdc, 0x40, 0xce, 0xd6, 0x61, 0x64, + 0xed, 0x42, 0x75, 0x52, 0x67, 0xed, 0x41, 0xed, 0xa0, 0xb9, 0x83, 0x8b, 0x5f, 0x5d, 0x0c, 0xe4, + 0x23, 0x8a, 0x12, 0x4e, 0xcd, 0xac, 0x6d, 0x52, 0x1d, 0xda, 0x42, 0x6d, 0xc7, 0xfd, 0x00, 0xc5, + 0xb0, 0x27, 0x45, 0xd1, 0xc3, 0x31, 0x3d, 0x8a, 0x52, 0xa6, 0x2c, 0xf9, 0xb7, 0xe9, 0xfa, 0xea, + 0x1f, 0xdd, 0xd4, 0x0f, 0xe5, 0x8b, 0x88, 0x4c, 0xcb, 0x87, 0xb4, 0xd4, 0x19, 0x72, 0x8e, 0x7c, + 0x17, 0xc6, 0xf1, 0xec, 0x67, 0x5f, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xe3, 0xe4, 0xf2, + 0xf4, 0x00, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCCertificatesClient interface { + DRPCConn() drpc.Conn + + Sign(ctx context.Context, in *SigningRequest) (*SigningResponse, error) +} + +type drpcCertificatesClient struct { + cc drpc.Conn +} + +func NewDRPCCertificatesClient(cc drpc.Conn) DRPCCertificatesClient { + return &drpcCertificatesClient{cc} +} + +func (c *drpcCertificatesClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcCertificatesClient) Sign(ctx context.Context, in *SigningRequest) (*SigningResponse, error) { + out := new(SigningResponse) + err := c.cc.Invoke(ctx, "/node.Certificates/Sign", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCCertificatesServer interface { + Sign(context.Context, *SigningRequest) (*SigningResponse, error) +} + +type DRPCCertificatesDescription struct{} + +func (DRPCCertificatesDescription) NumMethods() int { return 1 } + +func (DRPCCertificatesDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/node.Certificates/Sign", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCCertificatesServer). + Sign( + ctx, + in1.(*SigningRequest), + ) + }, DRPCCertificatesServer.Sign, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterCertificates(mux drpc.Mux, impl DRPCCertificatesServer) error { + return mux.Register(impl, DRPCCertificatesDescription{}) +} + +type DRPCCertificates_SignStream interface { + drpc.Stream + SendAndClose(*SigningResponse) error +} + +type drpcCertificatesSignStream struct { + drpc.Stream +} + +func (x *drpcCertificatesSignStream) SendAndClose(m *SigningResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/certificate.proto b/vendor/storj.io/common/pb/certificate.proto new file mode 100644 index 000000000..d753acb7d --- /dev/null +++ b/vendor/storj.io/common/pb/certificate.proto @@ -0,0 +1,20 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package node; + +service Certificates { + rpc Sign(SigningRequest) returns (SigningResponse); +} + +message SigningRequest { + string auth_token = 1; + int64 timestamp = 2; +} + +message SigningResponse { + repeated bytes chain = 1; +} diff --git a/vendor/storj.io/common/pb/contact.pb.go b/vendor/storj.io/common/pb/contact.pb.go new file mode 100644 index 000000000..1a47f1ddf --- /dev/null +++ b/vendor/storj.io/common/pb/contact.pb.go @@ -0,0 +1,483 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: contact.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CheckInRequest struct { + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + Version *NodeVersion `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Capacity *NodeCapacity `protobuf:"bytes,3,opt,name=capacity,proto3" json:"capacity,omitempty"` + Operator *NodeOperator `protobuf:"bytes,4,opt,name=operator,proto3" json:"operator,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckInRequest) Reset() { *m = CheckInRequest{} } +func (m *CheckInRequest) String() string { return proto.CompactTextString(m) } +func (*CheckInRequest) ProtoMessage() {} +func (*CheckInRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a5036fff2565fb15, []int{0} +} +func (m *CheckInRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CheckInRequest.Unmarshal(m, b) +} +func (m *CheckInRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CheckInRequest.Marshal(b, m, deterministic) +} +func (m *CheckInRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckInRequest.Merge(m, src) +} +func (m *CheckInRequest) XXX_Size() int { + return xxx_messageInfo_CheckInRequest.Size(m) +} +func (m *CheckInRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckInRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckInRequest proto.InternalMessageInfo + +func (m *CheckInRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *CheckInRequest) GetVersion() *NodeVersion { + if m != nil { + return m.Version + } + return nil +} + +func (m *CheckInRequest) GetCapacity() *NodeCapacity { + if m != nil { + return m.Capacity + } + return nil +} + +func (m *CheckInRequest) GetOperator() *NodeOperator { + if m != nil { + return m.Operator + } + return nil +} + +type CheckInResponse struct { + PingNodeSuccess bool `protobuf:"varint,1,opt,name=ping_node_success,json=pingNodeSuccess,proto3" json:"ping_node_success,omitempty"` + PingErrorMessage string `protobuf:"bytes,2,opt,name=ping_error_message,json=pingErrorMessage,proto3" json:"ping_error_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckInResponse) Reset() { *m = CheckInResponse{} } +func (m *CheckInResponse) String() string { return proto.CompactTextString(m) } +func (*CheckInResponse) ProtoMessage() {} +func (*CheckInResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a5036fff2565fb15, []int{1} +} +func (m *CheckInResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CheckInResponse.Unmarshal(m, b) +} +func (m *CheckInResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CheckInResponse.Marshal(b, m, deterministic) +} +func (m *CheckInResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckInResponse.Merge(m, src) +} +func (m *CheckInResponse) XXX_Size() int { + return xxx_messageInfo_CheckInResponse.Size(m) +} +func (m *CheckInResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CheckInResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckInResponse proto.InternalMessageInfo + +func (m *CheckInResponse) GetPingNodeSuccess() bool { + if m != nil { + return m.PingNodeSuccess + } + return false +} + +func (m *CheckInResponse) GetPingErrorMessage() string { + if m != nil { + return m.PingErrorMessage + } + return "" +} + +type GetTimeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTimeRequest) Reset() { *m = GetTimeRequest{} } +func (m *GetTimeRequest) String() string { return proto.CompactTextString(m) } +func (*GetTimeRequest) ProtoMessage() {} +func (*GetTimeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a5036fff2565fb15, []int{2} +} +func (m *GetTimeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTimeRequest.Unmarshal(m, b) +} +func (m *GetTimeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTimeRequest.Marshal(b, m, deterministic) +} +func (m *GetTimeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTimeRequest.Merge(m, src) +} +func (m *GetTimeRequest) XXX_Size() int { + return xxx_messageInfo_GetTimeRequest.Size(m) +} +func (m *GetTimeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTimeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTimeRequest proto.InternalMessageInfo + +type GetTimeResponse struct { + Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTimeResponse) Reset() { *m = GetTimeResponse{} } +func (m *GetTimeResponse) String() string { return proto.CompactTextString(m) } +func (*GetTimeResponse) ProtoMessage() {} +func (*GetTimeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a5036fff2565fb15, []int{3} +} +func (m *GetTimeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTimeResponse.Unmarshal(m, b) +} +func (m *GetTimeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTimeResponse.Marshal(b, m, deterministic) +} +func (m *GetTimeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTimeResponse.Merge(m, src) +} +func (m *GetTimeResponse) XXX_Size() int { + return xxx_messageInfo_GetTimeResponse.Size(m) +} +func (m *GetTimeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTimeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTimeResponse proto.InternalMessageInfo + +func (m *GetTimeResponse) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +type ContactPingRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContactPingRequest) Reset() { *m = ContactPingRequest{} } +func (m *ContactPingRequest) String() string { return proto.CompactTextString(m) } +func (*ContactPingRequest) ProtoMessage() {} +func (*ContactPingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a5036fff2565fb15, []int{4} +} +func (m *ContactPingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ContactPingRequest.Unmarshal(m, b) +} +func (m *ContactPingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ContactPingRequest.Marshal(b, m, deterministic) +} +func (m *ContactPingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContactPingRequest.Merge(m, src) +} +func (m *ContactPingRequest) XXX_Size() int { + return xxx_messageInfo_ContactPingRequest.Size(m) +} +func (m *ContactPingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ContactPingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ContactPingRequest proto.InternalMessageInfo + +type ContactPingResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContactPingResponse) Reset() { *m = ContactPingResponse{} } +func (m *ContactPingResponse) String() string { return proto.CompactTextString(m) } +func (*ContactPingResponse) ProtoMessage() {} +func (*ContactPingResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a5036fff2565fb15, []int{5} +} +func (m *ContactPingResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ContactPingResponse.Unmarshal(m, b) +} +func (m *ContactPingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ContactPingResponse.Marshal(b, m, deterministic) +} +func (m *ContactPingResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContactPingResponse.Merge(m, src) +} +func (m *ContactPingResponse) XXX_Size() int { + return xxx_messageInfo_ContactPingResponse.Size(m) +} +func (m *ContactPingResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ContactPingResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ContactPingResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CheckInRequest)(nil), "contact.CheckInRequest") + proto.RegisterType((*CheckInResponse)(nil), "contact.CheckInResponse") + proto.RegisterType((*GetTimeRequest)(nil), "contact.GetTimeRequest") + proto.RegisterType((*GetTimeResponse)(nil), "contact.GetTimeResponse") + proto.RegisterType((*ContactPingRequest)(nil), "contact.ContactPingRequest") + proto.RegisterType((*ContactPingResponse)(nil), "contact.ContactPingResponse") +} + +func init() { proto.RegisterFile("contact.proto", fileDescriptor_a5036fff2565fb15) } + +var fileDescriptor_a5036fff2565fb15 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xc5, 0x50, 0xe1, 0x64, 0x2a, 0x9a, 0x76, 0x29, 0xc2, 0x32, 0x48, 0xa9, 0x7c, 0xaa, 0x00, + 0xad, 0xa5, 0x70, 0xed, 0x29, 0x51, 0x85, 0x38, 0x00, 0xd1, 0x52, 0x38, 0x70, 0x89, 0x9c, 0xf5, + 0x60, 0x4c, 0xf1, 0x8e, 0xd9, 0xdd, 0x20, 0x71, 0xe5, 0x0b, 0xf8, 0x1b, 0x7e, 0x81, 0xaf, 0x80, + 0x5f, 0x41, 0xde, 0x5d, 0x3b, 0x4d, 0xcb, 0xcd, 0xf3, 0xde, 0x9b, 0x37, 0x9e, 0x37, 0x0b, 0xf7, + 0x24, 0x29, 0x5b, 0x48, 0xcb, 0x5b, 0x4d, 0x96, 0x58, 0x1c, 0xca, 0x14, 0x2a, 0xaa, 0xc8, 0x83, + 0xe9, 0xb4, 0x22, 0xaa, 0xbe, 0x60, 0xee, 0xaa, 0xf5, 0xe6, 0x63, 0x6e, 0xeb, 0x06, 0x8d, 0x2d, + 0x9a, 0x36, 0x08, 0x40, 0x51, 0x89, 0xfe, 0x3b, 0xfb, 0x15, 0xc1, 0xc1, 0xe2, 0x13, 0xca, 0xcb, + 0x97, 0x4a, 0xe0, 0xd7, 0x0d, 0x1a, 0xcb, 0x12, 0x88, 0x8b, 0xb2, 0xd4, 0x68, 0x4c, 0x12, 0x9d, + 0x44, 0xa7, 0x63, 0xd1, 0x97, 0xec, 0x29, 0xc4, 0xdf, 0x50, 0x9b, 0x9a, 0x54, 0x72, 0xfb, 0x24, + 0x3a, 0xdd, 0x9f, 0x1d, 0x71, 0x67, 0xf5, 0x9a, 0x4a, 0x7c, 0xef, 0x09, 0xd1, 0x2b, 0x18, 0x87, + 0x91, 0x2c, 0xda, 0x42, 0xd6, 0xf6, 0x7b, 0x72, 0xc7, 0xa9, 0xd9, 0x56, 0xbd, 0x08, 0x8c, 0x18, + 0x34, 0x9d, 0x9e, 0x5a, 0xd4, 0x85, 0x25, 0x9d, 0xec, 0x5d, 0xd7, 0xbf, 0x09, 0x8c, 0x18, 0x34, + 0xd9, 0x25, 0x4c, 0x86, 0x1f, 0x37, 0x2d, 0x29, 0x83, 0xec, 0x09, 0x1c, 0xb5, 0xb5, 0xaa, 0x56, + 0x5d, 0xdb, 0xca, 0x6c, 0xa4, 0xec, 0x77, 0x18, 0x89, 0x49, 0x47, 0x74, 0x4e, 0x6f, 0x3d, 0xcc, + 0x9e, 0x01, 0x73, 0x5a, 0xd4, 0x9a, 0xf4, 0xaa, 0x41, 0x63, 0x8a, 0x0a, 0xdd, 0x5a, 0x63, 0x71, + 0xd8, 0x31, 0xe7, 0x1d, 0xf1, 0xca, 0xe3, 0xd9, 0x21, 0x1c, 0xbc, 0x40, 0x7b, 0x51, 0x37, 0x18, + 0x52, 0xca, 0xde, 0xc1, 0x64, 0x40, 0xc2, 0xf8, 0x39, 0x8c, 0x87, 0xa8, 0xdd, 0xd8, 0xfd, 0x59, + 0xca, 0xfd, 0x31, 0x78, 0x7f, 0x0c, 0x7e, 0xd1, 0x2b, 0xe6, 0xa3, 0xdf, 0x7f, 0xa6, 0xb7, 0x7e, + 0xfe, 0x9d, 0x46, 0x62, 0xdb, 0x96, 0x1d, 0x03, 0x5b, 0xf8, 0x9b, 0x2e, 0x6b, 0x55, 0xf5, 0xc3, + 0x1e, 0xc0, 0xfd, 0x1d, 0xd4, 0x0f, 0x9c, 0x2d, 0x21, 0x0e, 0x30, 0x3b, 0x87, 0xd1, 0x32, 0x6c, + 0xc8, 0x1e, 0xf1, 0xfe, 0x95, 0xdc, 0xb4, 0x4a, 0x1f, 0xff, 0x9f, 0x0c, 0x8e, 0x3f, 0x22, 0xd8, + 0x73, 0x1e, 0x67, 0x10, 0x87, 0x74, 0xd9, 0xc3, 0x6d, 0xc7, 0xce, 0x43, 0x49, 0x93, 0x9b, 0x44, + 0x48, 0xe2, 0x0c, 0xe2, 0x10, 0xce, 0x95, 0xee, 0xdd, 0x00, 0xaf, 0x74, 0x5f, 0xcb, 0x71, 0x7e, + 0xfc, 0x81, 0x19, 0x4b, 0xfa, 0x33, 0xaf, 0x29, 0x97, 0xd4, 0x34, 0xa4, 0xf2, 0x76, 0xbd, 0xbe, + 0xeb, 0x22, 0x7c, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x76, 0x56, 0xc3, 0x57, 0x03, 0x03, 0x00, + 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCContactClient interface { + DRPCConn() drpc.Conn + + PingNode(ctx context.Context, in *ContactPingRequest) (*ContactPingResponse, error) +} + +type drpcContactClient struct { + cc drpc.Conn +} + +func NewDRPCContactClient(cc drpc.Conn) DRPCContactClient { + return &drpcContactClient{cc} +} + +func (c *drpcContactClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcContactClient) PingNode(ctx context.Context, in *ContactPingRequest) (*ContactPingResponse, error) { + out := new(ContactPingResponse) + err := c.cc.Invoke(ctx, "/contact.Contact/PingNode", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCContactServer interface { + PingNode(context.Context, *ContactPingRequest) (*ContactPingResponse, error) +} + +type DRPCContactDescription struct{} + +func (DRPCContactDescription) NumMethods() int { return 1 } + +func (DRPCContactDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/contact.Contact/PingNode", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCContactServer). + PingNode( + ctx, + in1.(*ContactPingRequest), + ) + }, DRPCContactServer.PingNode, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterContact(mux drpc.Mux, impl DRPCContactServer) error { + return mux.Register(impl, DRPCContactDescription{}) +} + +type DRPCContact_PingNodeStream interface { + drpc.Stream + SendAndClose(*ContactPingResponse) error +} + +type drpcContactPingNodeStream struct { + drpc.Stream +} + +func (x *drpcContactPingNodeStream) SendAndClose(m *ContactPingResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCNodeClient interface { + DRPCConn() drpc.Conn + + CheckIn(ctx context.Context, in *CheckInRequest) (*CheckInResponse, error) + GetTime(ctx context.Context, in *GetTimeRequest) (*GetTimeResponse, error) +} + +type drpcNodeClient struct { + cc drpc.Conn +} + +func NewDRPCNodeClient(cc drpc.Conn) DRPCNodeClient { + return &drpcNodeClient{cc} +} + +func (c *drpcNodeClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcNodeClient) CheckIn(ctx context.Context, in *CheckInRequest) (*CheckInResponse, error) { + out := new(CheckInResponse) + err := c.cc.Invoke(ctx, "/contact.Node/CheckIn", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcNodeClient) GetTime(ctx context.Context, in *GetTimeRequest) (*GetTimeResponse, error) { + out := new(GetTimeResponse) + err := c.cc.Invoke(ctx, "/contact.Node/GetTime", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCNodeServer interface { + CheckIn(context.Context, *CheckInRequest) (*CheckInResponse, error) + GetTime(context.Context, *GetTimeRequest) (*GetTimeResponse, error) +} + +type DRPCNodeDescription struct{} + +func (DRPCNodeDescription) NumMethods() int { return 2 } + +func (DRPCNodeDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/contact.Node/CheckIn", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeServer). + CheckIn( + ctx, + in1.(*CheckInRequest), + ) + }, DRPCNodeServer.CheckIn, true + case 1: + return "/contact.Node/GetTime", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeServer). + GetTime( + ctx, + in1.(*GetTimeRequest), + ) + }, DRPCNodeServer.GetTime, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterNode(mux drpc.Mux, impl DRPCNodeServer) error { + return mux.Register(impl, DRPCNodeDescription{}) +} + +type DRPCNode_CheckInStream interface { + drpc.Stream + SendAndClose(*CheckInResponse) error +} + +type drpcNodeCheckInStream struct { + drpc.Stream +} + +func (x *drpcNodeCheckInStream) SendAndClose(m *CheckInResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCNode_GetTimeStream interface { + drpc.Stream + SendAndClose(*GetTimeResponse) error +} + +type drpcNodeGetTimeStream struct { + drpc.Stream +} + +func (x *drpcNodeGetTimeStream) SendAndClose(m *GetTimeResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/contact.proto b/vendor/storj.io/common/pb/contact.proto new file mode 100644 index 000000000..ec322fcda --- /dev/null +++ b/vendor/storj.io/common/pb/contact.proto @@ -0,0 +1,42 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package contact; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "node.proto"; + +service Contact { + rpc PingNode(ContactPingRequest) returns (ContactPingResponse); +} + +service Node { + rpc CheckIn(CheckInRequest) returns (CheckInResponse); + rpc GetTime(GetTimeRequest) returns (GetTimeResponse); +} + +message CheckInRequest { + string address = 1; + node.NodeVersion version = 2; + node.NodeCapacity capacity = 3; + node.NodeOperator operator = 4; +} + +message CheckInResponse { + bool ping_node_success = 1; + string ping_error_message = 2; +} + +message GetTimeRequest {} + +message GetTimeResponse { + google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message ContactPingRequest {} + +message ContactPingResponse {} diff --git a/vendor/storj.io/common/pb/datarepair.pb.go b/vendor/storj.io/common/pb/datarepair.pb.go new file mode 100644 index 000000000..7f31f2f0c --- /dev/null +++ b/vendor/storj.io/common/pb/datarepair.pb.go @@ -0,0 +1,103 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: datarepair.proto + +package pb + +import ( + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// InjuredSegment is the queue item used for the data repair queue +type InjuredSegment struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + LostPieces []int32 `protobuf:"varint,2,rep,packed,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"` + InsertedTime time.Time `protobuf:"bytes,3,opt,name=inserted_time,json=insertedTime,proto3,stdtime" json:"inserted_time"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InjuredSegment) Reset() { *m = InjuredSegment{} } +func (m *InjuredSegment) String() string { return proto.CompactTextString(m) } +func (*InjuredSegment) ProtoMessage() {} +func (*InjuredSegment) Descriptor() ([]byte, []int) { + return fileDescriptor_b1b08e6fe9398aa6, []int{0} +} +func (m *InjuredSegment) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InjuredSegment.Unmarshal(m, b) +} +func (m *InjuredSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InjuredSegment.Marshal(b, m, deterministic) +} +func (m *InjuredSegment) XXX_Merge(src proto.Message) { + xxx_messageInfo_InjuredSegment.Merge(m, src) +} +func (m *InjuredSegment) XXX_Size() int { + return xxx_messageInfo_InjuredSegment.Size(m) +} +func (m *InjuredSegment) XXX_DiscardUnknown() { + xxx_messageInfo_InjuredSegment.DiscardUnknown(m) +} + +var xxx_messageInfo_InjuredSegment proto.InternalMessageInfo + +func (m *InjuredSegment) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *InjuredSegment) GetLostPieces() []int32 { + if m != nil { + return m.LostPieces + } + return nil +} + +func (m *InjuredSegment) GetInsertedTime() time.Time { + if m != nil { + return m.InsertedTime + } + return time.Time{} +} + +func init() { + proto.RegisterType((*InjuredSegment)(nil), "repair.InjuredSegment") +} + +func init() { proto.RegisterFile("datarepair.proto", fileDescriptor_b1b08e6fe9398aa6) } + +var fileDescriptor_b1b08e6fe9398aa6 = []byte{ + // 217 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0x3f, 0x4e, 0xc3, 0x30, + 0x14, 0x87, 0x31, 0x85, 0x0a, 0xb9, 0x05, 0x21, 0x8b, 0x21, 0xca, 0x92, 0x88, 0x29, 0x93, 0x2d, + 0xc1, 0x0d, 0xba, 0x75, 0x43, 0x81, 0x89, 0xa5, 0x72, 0x9a, 0x87, 0x71, 0x55, 0xfb, 0x59, 0xf6, + 0xeb, 0x3d, 0x72, 0x2c, 0x4e, 0x01, 0x57, 0x41, 0xb1, 0x95, 0xed, 0xfd, 0xbe, 0xf7, 0xef, 0xe3, + 0x8f, 0xa3, 0x26, 0x1d, 0x21, 0x68, 0x1b, 0x65, 0x88, 0x48, 0x28, 0xd6, 0x25, 0xd5, 0xdc, 0xa0, + 0xc1, 0xc2, 0xea, 0xc6, 0x20, 0x9a, 0x33, 0xa8, 0x9c, 0x86, 0xcb, 0x97, 0x22, 0xeb, 0x20, 0x91, + 0x76, 0xa1, 0x0c, 0x3c, 0x4f, 0x8c, 0x3f, 0xec, 0xfd, 0xe9, 0x12, 0x61, 0x7c, 0x07, 0xe3, 0xc0, + 0x93, 0x10, 0xfc, 0x26, 0x68, 0xfa, 0xae, 0x58, 0xcb, 0xba, 0x6d, 0x9f, 0x6b, 0xd1, 0xf0, 0xcd, + 0x19, 0x13, 0x1d, 0x82, 0x85, 0x23, 0xa4, 0xea, 0xba, 0x5d, 0x75, 0xb7, 0x3d, 0x9f, 0xd1, 0x5b, + 0x26, 0x62, 0xcf, 0xef, 0xad, 0x4f, 0x10, 0x09, 0xc6, 0xc3, 0xfc, 0xa3, 0x5a, 0xb5, 0xac, 0xdb, + 0xbc, 0xd4, 0xb2, 0x08, 0xc8, 0x45, 0x40, 0x7e, 0x2c, 0x02, 0xbb, 0xbb, 0x9f, 0xdf, 0xe6, 0x6a, + 0xfa, 0x6b, 0x58, 0xbf, 0x5d, 0x56, 0xe7, 0xe6, 0xee, 0xe9, 0x53, 0x24, 0xc2, 0x78, 0x92, 0x16, + 0xd5, 0x11, 0x9d, 0x43, 0xaf, 0xc2, 0x30, 0xac, 0xf3, 0x85, 0xd7, 0xff, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x1f, 0x51, 0xa8, 0x4d, 0xf8, 0x00, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/datarepair.proto b/vendor/storj.io/common/pb/datarepair.proto new file mode 100644 index 000000000..b2ddad246 --- /dev/null +++ b/vendor/storj.io/common/pb/datarepair.proto @@ -0,0 +1,16 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +package repair; + +// InjuredSegment is the queue item used for the data repair queue +message InjuredSegment { + bytes path = 1; + repeated int32 lost_pieces = 2; + google.protobuf.Timestamp inserted_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} diff --git a/vendor/storj.io/common/pb/doc.go b/vendor/storj.io/common/pb/doc.go new file mode 100644 index 000000000..9d3a437f4 --- /dev/null +++ b/vendor/storj.io/common/pb/doc.go @@ -0,0 +1,7 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package pb contains protobuf definitions for Storj peers. +package pb + +//go:generate go run gen.go diff --git a/vendor/storj.io/common/pb/encryption.pb.go b/vendor/storj.io/common/pb/encryption.pb.go new file mode 100644 index 000000000..94fa4989e --- /dev/null +++ b/vendor/storj.io/common/pb/encryption.pb.go @@ -0,0 +1,125 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: encryption.proto + +package pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type CipherSuite int32 + +const ( + CipherSuite_ENC_UNSPECIFIED CipherSuite = 0 + CipherSuite_ENC_NULL CipherSuite = 1 + CipherSuite_ENC_AESGCM CipherSuite = 2 + CipherSuite_ENC_SECRETBOX CipherSuite = 3 +) + +var CipherSuite_name = map[int32]string{ + 0: "ENC_UNSPECIFIED", + 1: "ENC_NULL", + 2: "ENC_AESGCM", + 3: "ENC_SECRETBOX", +} + +var CipherSuite_value = map[string]int32{ + "ENC_UNSPECIFIED": 0, + "ENC_NULL": 1, + "ENC_AESGCM": 2, + "ENC_SECRETBOX": 3, +} + +func (x CipherSuite) String() string { + return proto.EnumName(CipherSuite_name, int32(x)) +} + +func (CipherSuite) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8293a649ce9418c6, []int{0} +} + +type EncryptionParameters struct { + CipherSuite CipherSuite `protobuf:"varint,1,opt,name=cipher_suite,json=cipherSuite,proto3,enum=encryption.CipherSuite" json:"cipher_suite,omitempty"` + BlockSize int64 `protobuf:"varint,2,opt,name=block_size,json=blockSize,proto3" json:"block_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptionParameters) Reset() { *m = EncryptionParameters{} } +func (m *EncryptionParameters) String() string { return proto.CompactTextString(m) } +func (*EncryptionParameters) ProtoMessage() {} +func (*EncryptionParameters) Descriptor() ([]byte, []int) { + return fileDescriptor_8293a649ce9418c6, []int{0} +} +func (m *EncryptionParameters) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptionParameters.Unmarshal(m, b) +} +func (m *EncryptionParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptionParameters.Marshal(b, m, deterministic) +} +func (m *EncryptionParameters) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptionParameters.Merge(m, src) +} +func (m *EncryptionParameters) XXX_Size() int { + return xxx_messageInfo_EncryptionParameters.Size(m) +} +func (m *EncryptionParameters) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptionParameters.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptionParameters proto.InternalMessageInfo + +func (m *EncryptionParameters) GetCipherSuite() CipherSuite { + if m != nil { + return m.CipherSuite + } + return CipherSuite_ENC_UNSPECIFIED +} + +func (m *EncryptionParameters) GetBlockSize() int64 { + if m != nil { + return m.BlockSize + } + return 0 +} + +func init() { + proto.RegisterEnum("encryption.CipherSuite", CipherSuite_name, CipherSuite_value) + proto.RegisterType((*EncryptionParameters)(nil), "encryption.EncryptionParameters") +} + +func init() { proto.RegisterFile("encryption.proto", fileDescriptor_8293a649ce9418c6) } + +var fileDescriptor_8293a649ce9418c6 = []byte{ + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcd, 0x4b, 0x2e, + 0xaa, 0x2c, 0x28, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, + 0x28, 0x15, 0x72, 0x89, 0xb8, 0xc2, 0x79, 0x01, 0x89, 0x45, 0x89, 0xb9, 0xa9, 0x25, 0xa9, 0x45, + 0xc5, 0x42, 0x56, 0x5c, 0x3c, 0xc9, 0x99, 0x05, 0x19, 0xa9, 0x45, 0xf1, 0xc5, 0xa5, 0x99, 0x25, + 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x7c, 0x46, 0xe2, 0x7a, 0x48, 0x86, 0x39, 0x83, 0xe5, 0x83, + 0x41, 0xd2, 0x41, 0xdc, 0xc9, 0x08, 0x8e, 0x90, 0x2c, 0x17, 0x57, 0x52, 0x4e, 0x7e, 0x72, 0x76, + 0x7c, 0x71, 0x66, 0x55, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x27, 0x58, 0x24, 0x38, + 0xb3, 0x2a, 0x55, 0x2b, 0x98, 0x8b, 0x1b, 0x49, 0xab, 0x90, 0x30, 0x17, 0xbf, 0xab, 0x9f, 0x73, + 0x7c, 0xa8, 0x5f, 0x70, 0x80, 0xab, 0xb3, 0xa7, 0x9b, 0xa7, 0xab, 0x8b, 0x00, 0x83, 0x10, 0x0f, + 0x17, 0x07, 0x48, 0xd0, 0x2f, 0xd4, 0xc7, 0x47, 0x80, 0x51, 0x88, 0x8f, 0x8b, 0x0b, 0xc4, 0x73, + 0x74, 0x0d, 0x76, 0x77, 0xf6, 0x15, 0x60, 0x12, 0x12, 0xe4, 0xe2, 0x05, 0xf1, 0x83, 0x5d, 0x9d, + 0x83, 0x5c, 0x43, 0x9c, 0xfc, 0x23, 0x04, 0x98, 0x9d, 0x44, 0xa2, 0x84, 0x8a, 0x4b, 0xf2, 0x8b, + 0xb2, 0xf4, 0x32, 0xf3, 0xf5, 0x93, 0xf3, 0x73, 0x73, 0xf3, 0xf3, 0xf4, 0x0b, 0x92, 0x92, 0xd8, + 0xc0, 0x1e, 0x36, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x70, 0xc9, 0x84, 0x04, 0x01, 0x00, + 0x00, +} diff --git a/vendor/storj.io/common/pb/encryption.proto b/vendor/storj.io/common/pb/encryption.proto new file mode 100644 index 000000000..792a28884 --- /dev/null +++ b/vendor/storj.io/common/pb/encryption.proto @@ -0,0 +1,19 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package encryption; + +message EncryptionParameters { + CipherSuite cipher_suite = 1; + int64 block_size = 2; +} + +enum CipherSuite { + ENC_UNSPECIFIED = 0; + ENC_NULL = 1; + ENC_AESGCM = 2; + ENC_SECRETBOX = 3; +} diff --git a/vendor/storj.io/common/pb/encryption_access.pb.go b/vendor/storj.io/common/pb/encryption_access.pb.go new file mode 100644 index 000000000..a3be557b5 --- /dev/null +++ b/vendor/storj.io/common/pb/encryption_access.pb.go @@ -0,0 +1,195 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: encryption_access.proto + +package pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type EncryptionAccess struct { + DefaultKey []byte `protobuf:"bytes,1,opt,name=default_key,json=defaultKey,proto3" json:"default_key,omitempty"` + StoreEntries []*EncryptionAccess_StoreEntry `protobuf:"bytes,2,rep,name=store_entries,json=storeEntries,proto3" json:"store_entries,omitempty"` + DefaultPathCipher CipherSuite `protobuf:"varint,3,opt,name=default_path_cipher,json=defaultPathCipher,proto3,enum=encryption.CipherSuite" json:"default_path_cipher,omitempty"` + DefaultEncryptionParameters *EncryptionParameters `protobuf:"bytes,4,opt,name=default_encryption_parameters,json=defaultEncryptionParameters,proto3" json:"default_encryption_parameters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptionAccess) Reset() { *m = EncryptionAccess{} } +func (m *EncryptionAccess) String() string { return proto.CompactTextString(m) } +func (*EncryptionAccess) ProtoMessage() {} +func (*EncryptionAccess) Descriptor() ([]byte, []int) { + return fileDescriptor_464b1a18bff4a17b, []int{0} +} +func (m *EncryptionAccess) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptionAccess.Unmarshal(m, b) +} +func (m *EncryptionAccess) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptionAccess.Marshal(b, m, deterministic) +} +func (m *EncryptionAccess) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptionAccess.Merge(m, src) +} +func (m *EncryptionAccess) XXX_Size() int { + return xxx_messageInfo_EncryptionAccess.Size(m) +} +func (m *EncryptionAccess) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptionAccess.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptionAccess proto.InternalMessageInfo + +func (m *EncryptionAccess) GetDefaultKey() []byte { + if m != nil { + return m.DefaultKey + } + return nil +} + +func (m *EncryptionAccess) GetStoreEntries() []*EncryptionAccess_StoreEntry { + if m != nil { + return m.StoreEntries + } + return nil +} + +func (m *EncryptionAccess) GetDefaultPathCipher() CipherSuite { + if m != nil { + return m.DefaultPathCipher + } + return CipherSuite_ENC_UNSPECIFIED +} + +func (m *EncryptionAccess) GetDefaultEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.DefaultEncryptionParameters + } + return nil +} + +type EncryptionAccess_StoreEntry struct { + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + UnencryptedPath []byte `protobuf:"bytes,2,opt,name=unencrypted_path,json=unencryptedPath,proto3" json:"unencrypted_path,omitempty"` + EncryptedPath []byte `protobuf:"bytes,3,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Key []byte `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"` + PathCipher CipherSuite `protobuf:"varint,5,opt,name=path_cipher,json=pathCipher,proto3,enum=encryption.CipherSuite" json:"path_cipher,omitempty"` + EncryptionParameters *EncryptionParameters `protobuf:"bytes,6,opt,name=encryption_parameters,json=encryptionParameters,proto3" json:"encryption_parameters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EncryptionAccess_StoreEntry) Reset() { *m = EncryptionAccess_StoreEntry{} } +func (m *EncryptionAccess_StoreEntry) String() string { return proto.CompactTextString(m) } +func (*EncryptionAccess_StoreEntry) ProtoMessage() {} +func (*EncryptionAccess_StoreEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_464b1a18bff4a17b, []int{0, 0} +} +func (m *EncryptionAccess_StoreEntry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EncryptionAccess_StoreEntry.Unmarshal(m, b) +} +func (m *EncryptionAccess_StoreEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EncryptionAccess_StoreEntry.Marshal(b, m, deterministic) +} +func (m *EncryptionAccess_StoreEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_EncryptionAccess_StoreEntry.Merge(m, src) +} +func (m *EncryptionAccess_StoreEntry) XXX_Size() int { + return xxx_messageInfo_EncryptionAccess_StoreEntry.Size(m) +} +func (m *EncryptionAccess_StoreEntry) XXX_DiscardUnknown() { + xxx_messageInfo_EncryptionAccess_StoreEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_EncryptionAccess_StoreEntry proto.InternalMessageInfo + +func (m *EncryptionAccess_StoreEntry) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *EncryptionAccess_StoreEntry) GetUnencryptedPath() []byte { + if m != nil { + return m.UnencryptedPath + } + return nil +} + +func (m *EncryptionAccess_StoreEntry) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *EncryptionAccess_StoreEntry) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *EncryptionAccess_StoreEntry) GetPathCipher() CipherSuite { + if m != nil { + return m.PathCipher + } + return CipherSuite_ENC_UNSPECIFIED +} + +func (m *EncryptionAccess_StoreEntry) GetEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.EncryptionParameters + } + return nil +} + +func init() { + proto.RegisterType((*EncryptionAccess)(nil), "encryption_access.EncryptionAccess") + proto.RegisterType((*EncryptionAccess_StoreEntry)(nil), "encryption_access.EncryptionAccess.StoreEntry") +} + +func init() { proto.RegisterFile("encryption_access.proto", fileDescriptor_464b1a18bff4a17b) } + +var fileDescriptor_464b1a18bff4a17b = []byte{ + // 340 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x41, 0x4f, 0xf2, 0x40, + 0x14, 0x4c, 0x29, 0x1f, 0x87, 0x57, 0xe0, 0x2b, 0x2b, 0x4a, 0x83, 0x31, 0x36, 0x26, 0x26, 0xf5, + 0x52, 0x12, 0xbc, 0x78, 0x55, 0x43, 0x3c, 0x78, 0x21, 0x25, 0x5e, 0xbc, 0x34, 0xa5, 0x3c, 0x43, + 0x45, 0x76, 0x37, 0xbb, 0xdb, 0x43, 0x7f, 0x8a, 0xbf, 0xcb, 0x3f, 0x64, 0xba, 0x2c, 0xb4, 0x02, + 0x31, 0xde, 0xb6, 0x33, 0xb3, 0xb3, 0xf3, 0xe6, 0x15, 0x06, 0x48, 0x53, 0x51, 0x70, 0x95, 0x31, + 0x1a, 0x27, 0x69, 0x8a, 0x52, 0x86, 0x5c, 0x30, 0xc5, 0x48, 0xef, 0x80, 0x18, 0xba, 0x15, 0xb4, + 0x11, 0x5d, 0x7d, 0x35, 0xc1, 0x9d, 0xec, 0xc0, 0x7b, 0x2d, 0x23, 0x97, 0xe0, 0x2c, 0xf0, 0x2d, + 0xc9, 0x3f, 0x54, 0xbc, 0xc2, 0xc2, 0xb3, 0x7c, 0x2b, 0x68, 0x47, 0x60, 0xa0, 0x67, 0x2c, 0xc8, + 0x0c, 0x3a, 0x52, 0x31, 0x81, 0x31, 0x52, 0x25, 0x32, 0x94, 0x5e, 0xc3, 0xb7, 0x03, 0x67, 0x1c, + 0x86, 0x87, 0x59, 0xf6, 0xcd, 0xc3, 0x59, 0x79, 0x71, 0x42, 0x95, 0x28, 0xa2, 0xb6, 0xdc, 0x9e, + 0x33, 0x94, 0xe4, 0x09, 0x4e, 0xb6, 0xaf, 0xf2, 0x44, 0x2d, 0xe3, 0x34, 0xe3, 0x4b, 0x14, 0x9e, + 0xed, 0x5b, 0x41, 0x77, 0x3c, 0xa8, 0x59, 0x87, 0x8f, 0x9a, 0x99, 0xe5, 0x99, 0xc2, 0xa8, 0x67, + 0xee, 0x4c, 0x13, 0xb5, 0xdc, 0xe0, 0x64, 0x01, 0x17, 0x5b, 0xa3, 0x5a, 0x1e, 0x9e, 0x88, 0x64, + 0x8d, 0x0a, 0x85, 0xf4, 0x9a, 0xbe, 0x15, 0x38, 0x63, 0xbf, 0x6e, 0x59, 0xc5, 0x9c, 0xee, 0x74, + 0xd1, 0xb9, 0xb1, 0x39, 0x46, 0x0e, 0x3f, 0x1b, 0x00, 0xd5, 0x2c, 0xe4, 0x0c, 0x5a, 0xf3, 0x3c, + 0x5d, 0xa1, 0x32, 0x75, 0x99, 0x2f, 0x72, 0x03, 0x6e, 0x4e, 0xcd, 0x43, 0xb8, 0xd0, 0x93, 0x79, + 0x0d, 0xad, 0xf8, 0x5f, 0xc3, 0xcb, 0xf4, 0xe4, 0x1a, 0xba, 0x7b, 0x42, 0x5b, 0x0b, 0x3b, 0x3f, + 0x65, 0x2e, 0xd8, 0xe5, 0x56, 0x9a, 0x9a, 0x2b, 0x8f, 0xe4, 0x0e, 0x9c, 0x7a, 0x63, 0xff, 0x7e, + 0x6f, 0x0c, 0x78, 0x55, 0xd5, 0x0b, 0x9c, 0x1e, 0xaf, 0xa8, 0xf5, 0xc7, 0x8a, 0xfa, 0x78, 0x04, + 0x7d, 0xe8, 0xbf, 0x92, 0x72, 0xb5, 0xef, 0x61, 0xc6, 0x46, 0x29, 0x5b, 0xaf, 0x19, 0x1d, 0xf1, + 0xf9, 0xbc, 0xa5, 0x7f, 0xb9, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xa8, 0x25, 0x28, + 0xb2, 0x02, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/encryption_access.proto b/vendor/storj.io/common/pb/encryption_access.proto new file mode 100644 index 000000000..031d861d7 --- /dev/null +++ b/vendor/storj.io/common/pb/encryption_access.proto @@ -0,0 +1,27 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; + +option go_package = "storj.io/common/pb"; + +package encryption_access; + +import "encryption.proto"; + +message EncryptionAccess { + message StoreEntry { + bytes bucket = 1; + bytes unencrypted_path = 2; + bytes encrypted_path = 3; + bytes key = 4; + + encryption.CipherSuite path_cipher = 5; + encryption.EncryptionParameters encryption_parameters = 6; + } + + bytes default_key = 1; + repeated StoreEntry store_entries = 2; + encryption.CipherSuite default_path_cipher = 3; + encryption.EncryptionParameters default_encryption_parameters = 4; +} diff --git a/vendor/storj.io/common/pb/gogo.proto b/vendor/storj.io/common/pb/gogo.proto new file mode 100644 index 000000000..937487bf8 --- /dev/null +++ b/vendor/storj.io/common/pb/gogo.proto @@ -0,0 +1,143 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + optional bool compare = 65013; + +} \ No newline at end of file diff --git a/vendor/storj.io/common/pb/gracefulexit.pb.go b/vendor/storj.io/common/pb/gracefulexit.pb.go new file mode 100644 index 000000000..35e598925 --- /dev/null +++ b/vendor/storj.io/common/pb/gracefulexit.pb.go @@ -0,0 +1,1406 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gracefulexit.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TransferFailed_Error int32 + +const ( + TransferFailed_NOT_FOUND TransferFailed_Error = 0 + TransferFailed_STORAGE_NODE_UNAVAILABLE TransferFailed_Error = 1 + TransferFailed_HASH_VERIFICATION TransferFailed_Error = 2 + TransferFailed_UNKNOWN TransferFailed_Error = 10 +) + +var TransferFailed_Error_name = map[int32]string{ + 0: "NOT_FOUND", + 1: "STORAGE_NODE_UNAVAILABLE", + 2: "HASH_VERIFICATION", + 10: "UNKNOWN", +} + +var TransferFailed_Error_value = map[string]int32{ + "NOT_FOUND": 0, + "STORAGE_NODE_UNAVAILABLE": 1, + "HASH_VERIFICATION": 2, + "UNKNOWN": 10, +} + +func (x TransferFailed_Error) String() string { + return proto.EnumName(TransferFailed_Error_name, int32(x)) +} + +func (TransferFailed_Error) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{8, 0} +} + +type ExitFailed_Reason int32 + +const ( + ExitFailed_VERIFICATION_FAILED ExitFailed_Reason = 0 + ExitFailed_INACTIVE_TIMEFRAME_EXCEEDED ExitFailed_Reason = 1 + ExitFailed_OVERALL_FAILURE_PERCENTAGE_EXCEEDED ExitFailed_Reason = 2 +) + +var ExitFailed_Reason_name = map[int32]string{ + 0: "VERIFICATION_FAILED", + 1: "INACTIVE_TIMEFRAME_EXCEEDED", + 2: "OVERALL_FAILURE_PERCENTAGE_EXCEEDED", +} + +var ExitFailed_Reason_value = map[string]int32{ + "VERIFICATION_FAILED": 0, + "INACTIVE_TIMEFRAME_EXCEEDED": 1, + "OVERALL_FAILURE_PERCENTAGE_EXCEEDED": 2, +} + +func (x ExitFailed_Reason) String() string { + return proto.EnumName(ExitFailed_Reason_name, int32(x)) +} + +func (ExitFailed_Reason) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{14, 0} +} + +type GetNonExitingSatellitesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNonExitingSatellitesRequest) Reset() { *m = GetNonExitingSatellitesRequest{} } +func (m *GetNonExitingSatellitesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNonExitingSatellitesRequest) ProtoMessage() {} +func (*GetNonExitingSatellitesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{0} +} +func (m *GetNonExitingSatellitesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNonExitingSatellitesRequest.Unmarshal(m, b) +} +func (m *GetNonExitingSatellitesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNonExitingSatellitesRequest.Marshal(b, m, deterministic) +} +func (m *GetNonExitingSatellitesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNonExitingSatellitesRequest.Merge(m, src) +} +func (m *GetNonExitingSatellitesRequest) XXX_Size() int { + return xxx_messageInfo_GetNonExitingSatellitesRequest.Size(m) +} +func (m *GetNonExitingSatellitesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNonExitingSatellitesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNonExitingSatellitesRequest proto.InternalMessageInfo + +type InitiateGracefulExitRequest struct { + NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitiateGracefulExitRequest) Reset() { *m = InitiateGracefulExitRequest{} } +func (m *InitiateGracefulExitRequest) String() string { return proto.CompactTextString(m) } +func (*InitiateGracefulExitRequest) ProtoMessage() {} +func (*InitiateGracefulExitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{1} +} +func (m *InitiateGracefulExitRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitiateGracefulExitRequest.Unmarshal(m, b) +} +func (m *InitiateGracefulExitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitiateGracefulExitRequest.Marshal(b, m, deterministic) +} +func (m *InitiateGracefulExitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitiateGracefulExitRequest.Merge(m, src) +} +func (m *InitiateGracefulExitRequest) XXX_Size() int { + return xxx_messageInfo_InitiateGracefulExitRequest.Size(m) +} +func (m *InitiateGracefulExitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitiateGracefulExitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitiateGracefulExitRequest proto.InternalMessageInfo + +// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit +type NonExitingSatellite struct { + NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + DomainName string `protobuf:"bytes,2,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"` + SpaceUsed float64 `protobuf:"fixed64,3,opt,name=space_used,json=spaceUsed,proto3" json:"space_used,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonExitingSatellite) Reset() { *m = NonExitingSatellite{} } +func (m *NonExitingSatellite) String() string { return proto.CompactTextString(m) } +func (*NonExitingSatellite) ProtoMessage() {} +func (*NonExitingSatellite) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{2} +} +func (m *NonExitingSatellite) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonExitingSatellite.Unmarshal(m, b) +} +func (m *NonExitingSatellite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonExitingSatellite.Marshal(b, m, deterministic) +} +func (m *NonExitingSatellite) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonExitingSatellite.Merge(m, src) +} +func (m *NonExitingSatellite) XXX_Size() int { + return xxx_messageInfo_NonExitingSatellite.Size(m) +} +func (m *NonExitingSatellite) XXX_DiscardUnknown() { + xxx_messageInfo_NonExitingSatellite.DiscardUnknown(m) +} + +var xxx_messageInfo_NonExitingSatellite proto.InternalMessageInfo + +func (m *NonExitingSatellite) GetDomainName() string { + if m != nil { + return m.DomainName + } + return "" +} + +func (m *NonExitingSatellite) GetSpaceUsed() float64 { + if m != nil { + return m.SpaceUsed + } + return 0 +} + +type GetNonExitingSatellitesResponse struct { + Satellites []*NonExitingSatellite `protobuf:"bytes,1,rep,name=satellites,proto3" json:"satellites,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNonExitingSatellitesResponse) Reset() { *m = GetNonExitingSatellitesResponse{} } +func (m *GetNonExitingSatellitesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNonExitingSatellitesResponse) ProtoMessage() {} +func (*GetNonExitingSatellitesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{3} +} +func (m *GetNonExitingSatellitesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNonExitingSatellitesResponse.Unmarshal(m, b) +} +func (m *GetNonExitingSatellitesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNonExitingSatellitesResponse.Marshal(b, m, deterministic) +} +func (m *GetNonExitingSatellitesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNonExitingSatellitesResponse.Merge(m, src) +} +func (m *GetNonExitingSatellitesResponse) XXX_Size() int { + return xxx_messageInfo_GetNonExitingSatellitesResponse.Size(m) +} +func (m *GetNonExitingSatellitesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNonExitingSatellitesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNonExitingSatellitesResponse proto.InternalMessageInfo + +func (m *GetNonExitingSatellitesResponse) GetSatellites() []*NonExitingSatellite { + if m != nil { + return m.Satellites + } + return nil +} + +type GetExitProgressRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetExitProgressRequest) Reset() { *m = GetExitProgressRequest{} } +func (m *GetExitProgressRequest) String() string { return proto.CompactTextString(m) } +func (*GetExitProgressRequest) ProtoMessage() {} +func (*GetExitProgressRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{4} +} +func (m *GetExitProgressRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetExitProgressRequest.Unmarshal(m, b) +} +func (m *GetExitProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetExitProgressRequest.Marshal(b, m, deterministic) +} +func (m *GetExitProgressRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetExitProgressRequest.Merge(m, src) +} +func (m *GetExitProgressRequest) XXX_Size() int { + return xxx_messageInfo_GetExitProgressRequest.Size(m) +} +func (m *GetExitProgressRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetExitProgressRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetExitProgressRequest proto.InternalMessageInfo + +type GetExitProgressResponse struct { + Progress []*ExitProgress `protobuf:"bytes,1,rep,name=progress,proto3" json:"progress,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetExitProgressResponse) Reset() { *m = GetExitProgressResponse{} } +func (m *GetExitProgressResponse) String() string { return proto.CompactTextString(m) } +func (*GetExitProgressResponse) ProtoMessage() {} +func (*GetExitProgressResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{5} +} +func (m *GetExitProgressResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetExitProgressResponse.Unmarshal(m, b) +} +func (m *GetExitProgressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetExitProgressResponse.Marshal(b, m, deterministic) +} +func (m *GetExitProgressResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetExitProgressResponse.Merge(m, src) +} +func (m *GetExitProgressResponse) XXX_Size() int { + return xxx_messageInfo_GetExitProgressResponse.Size(m) +} +func (m *GetExitProgressResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetExitProgressResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetExitProgressResponse proto.InternalMessageInfo + +func (m *GetExitProgressResponse) GetProgress() []*ExitProgress { + if m != nil { + return m.Progress + } + return nil +} + +type ExitProgress struct { + DomainName string `protobuf:"bytes,1,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"` + NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + PercentComplete float32 `protobuf:"fixed32,3,opt,name=percent_complete,json=percentComplete,proto3" json:"percent_complete,omitempty"` + Successful bool `protobuf:"varint,4,opt,name=successful,proto3" json:"successful,omitempty"` + CompletionReceipt []byte `protobuf:"bytes,5,opt,name=completion_receipt,json=completionReceipt,proto3" json:"completion_receipt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExitProgress) Reset() { *m = ExitProgress{} } +func (m *ExitProgress) String() string { return proto.CompactTextString(m) } +func (*ExitProgress) ProtoMessage() {} +func (*ExitProgress) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{6} +} +func (m *ExitProgress) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExitProgress.Unmarshal(m, b) +} +func (m *ExitProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExitProgress.Marshal(b, m, deterministic) +} +func (m *ExitProgress) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExitProgress.Merge(m, src) +} +func (m *ExitProgress) XXX_Size() int { + return xxx_messageInfo_ExitProgress.Size(m) +} +func (m *ExitProgress) XXX_DiscardUnknown() { + xxx_messageInfo_ExitProgress.DiscardUnknown(m) +} + +var xxx_messageInfo_ExitProgress proto.InternalMessageInfo + +func (m *ExitProgress) GetDomainName() string { + if m != nil { + return m.DomainName + } + return "" +} + +func (m *ExitProgress) GetPercentComplete() float32 { + if m != nil { + return m.PercentComplete + } + return 0 +} + +func (m *ExitProgress) GetSuccessful() bool { + if m != nil { + return m.Successful + } + return false +} + +func (m *ExitProgress) GetCompletionReceipt() []byte { + if m != nil { + return m.CompletionReceipt + } + return nil +} + +type TransferSucceeded struct { + OriginalOrderLimit *OrderLimit `protobuf:"bytes,1,opt,name=original_order_limit,json=originalOrderLimit,proto3" json:"original_order_limit,omitempty"` + OriginalPieceHash *PieceHash `protobuf:"bytes,2,opt,name=original_piece_hash,json=originalPieceHash,proto3" json:"original_piece_hash,omitempty"` + ReplacementPieceHash *PieceHash `protobuf:"bytes,3,opt,name=replacement_piece_hash,json=replacementPieceHash,proto3" json:"replacement_piece_hash,omitempty"` + OriginalPieceId PieceID `protobuf:"bytes,4,opt,name=original_piece_id,json=originalPieceId,proto3,customtype=PieceID" json:"original_piece_id"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferSucceeded) Reset() { *m = TransferSucceeded{} } +func (m *TransferSucceeded) String() string { return proto.CompactTextString(m) } +func (*TransferSucceeded) ProtoMessage() {} +func (*TransferSucceeded) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{7} +} +func (m *TransferSucceeded) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TransferSucceeded.Unmarshal(m, b) +} +func (m *TransferSucceeded) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TransferSucceeded.Marshal(b, m, deterministic) +} +func (m *TransferSucceeded) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferSucceeded.Merge(m, src) +} +func (m *TransferSucceeded) XXX_Size() int { + return xxx_messageInfo_TransferSucceeded.Size(m) +} +func (m *TransferSucceeded) XXX_DiscardUnknown() { + xxx_messageInfo_TransferSucceeded.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferSucceeded proto.InternalMessageInfo + +func (m *TransferSucceeded) GetOriginalOrderLimit() *OrderLimit { + if m != nil { + return m.OriginalOrderLimit + } + return nil +} + +func (m *TransferSucceeded) GetOriginalPieceHash() *PieceHash { + if m != nil { + return m.OriginalPieceHash + } + return nil +} + +func (m *TransferSucceeded) GetReplacementPieceHash() *PieceHash { + if m != nil { + return m.ReplacementPieceHash + } + return nil +} + +type TransferFailed struct { + OriginalPieceId PieceID `protobuf:"bytes,1,opt,name=original_piece_id,json=originalPieceId,proto3,customtype=PieceID" json:"original_piece_id"` + Error TransferFailed_Error `protobuf:"varint,2,opt,name=error,proto3,enum=gracefulexit.TransferFailed_Error" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferFailed) Reset() { *m = TransferFailed{} } +func (m *TransferFailed) String() string { return proto.CompactTextString(m) } +func (*TransferFailed) ProtoMessage() {} +func (*TransferFailed) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{8} +} +func (m *TransferFailed) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TransferFailed.Unmarshal(m, b) +} +func (m *TransferFailed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TransferFailed.Marshal(b, m, deterministic) +} +func (m *TransferFailed) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferFailed.Merge(m, src) +} +func (m *TransferFailed) XXX_Size() int { + return xxx_messageInfo_TransferFailed.Size(m) +} +func (m *TransferFailed) XXX_DiscardUnknown() { + xxx_messageInfo_TransferFailed.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferFailed proto.InternalMessageInfo + +func (m *TransferFailed) GetError() TransferFailed_Error { + if m != nil { + return m.Error + } + return TransferFailed_NOT_FOUND +} + +type StorageNodeMessage struct { + // Types that are valid to be assigned to Message: + // *StorageNodeMessage_Succeeded + // *StorageNodeMessage_Failed + Message isStorageNodeMessage_Message `protobuf_oneof:"Message"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StorageNodeMessage) Reset() { *m = StorageNodeMessage{} } +func (m *StorageNodeMessage) String() string { return proto.CompactTextString(m) } +func (*StorageNodeMessage) ProtoMessage() {} +func (*StorageNodeMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{9} +} +func (m *StorageNodeMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StorageNodeMessage.Unmarshal(m, b) +} +func (m *StorageNodeMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StorageNodeMessage.Marshal(b, m, deterministic) +} +func (m *StorageNodeMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageNodeMessage.Merge(m, src) +} +func (m *StorageNodeMessage) XXX_Size() int { + return xxx_messageInfo_StorageNodeMessage.Size(m) +} +func (m *StorageNodeMessage) XXX_DiscardUnknown() { + xxx_messageInfo_StorageNodeMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageNodeMessage proto.InternalMessageInfo + +type isStorageNodeMessage_Message interface { + isStorageNodeMessage_Message() +} + +type StorageNodeMessage_Succeeded struct { + Succeeded *TransferSucceeded `protobuf:"bytes,1,opt,name=succeeded,proto3,oneof"` +} +type StorageNodeMessage_Failed struct { + Failed *TransferFailed `protobuf:"bytes,2,opt,name=failed,proto3,oneof"` +} + +func (*StorageNodeMessage_Succeeded) isStorageNodeMessage_Message() {} +func (*StorageNodeMessage_Failed) isStorageNodeMessage_Message() {} + +func (m *StorageNodeMessage) GetMessage() isStorageNodeMessage_Message { + if m != nil { + return m.Message + } + return nil +} + +func (m *StorageNodeMessage) GetSucceeded() *TransferSucceeded { + if x, ok := m.GetMessage().(*StorageNodeMessage_Succeeded); ok { + return x.Succeeded + } + return nil +} + +func (m *StorageNodeMessage) GetFailed() *TransferFailed { + if x, ok := m.GetMessage().(*StorageNodeMessage_Failed); ok { + return x.Failed + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StorageNodeMessage) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StorageNodeMessage_OneofMarshaler, _StorageNodeMessage_OneofUnmarshaler, _StorageNodeMessage_OneofSizer, []interface{}{ + (*StorageNodeMessage_Succeeded)(nil), + (*StorageNodeMessage_Failed)(nil), + } +} + +func _StorageNodeMessage_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StorageNodeMessage) + // Message + switch x := m.Message.(type) { + case *StorageNodeMessage_Succeeded: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Succeeded); err != nil { + return err + } + case *StorageNodeMessage_Failed: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Failed); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StorageNodeMessage.Message has unexpected type %T", x) + } + return nil +} + +func _StorageNodeMessage_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StorageNodeMessage) + switch tag { + case 1: // Message.succeeded + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransferSucceeded) + err := b.DecodeMessage(msg) + m.Message = &StorageNodeMessage_Succeeded{msg} + return true, err + case 2: // Message.failed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransferFailed) + err := b.DecodeMessage(msg) + m.Message = &StorageNodeMessage_Failed{msg} + return true, err + default: + return false, nil + } +} + +func _StorageNodeMessage_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StorageNodeMessage) + // Message + switch x := m.Message.(type) { + case *StorageNodeMessage_Succeeded: + s := proto.Size(x.Succeeded) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *StorageNodeMessage_Failed: + s := proto.Size(x.Failed) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NotReady struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotReady) Reset() { *m = NotReady{} } +func (m *NotReady) String() string { return proto.CompactTextString(m) } +func (*NotReady) ProtoMessage() {} +func (*NotReady) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{10} +} +func (m *NotReady) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotReady.Unmarshal(m, b) +} +func (m *NotReady) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotReady.Marshal(b, m, deterministic) +} +func (m *NotReady) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotReady.Merge(m, src) +} +func (m *NotReady) XXX_Size() int { + return xxx_messageInfo_NotReady.Size(m) +} +func (m *NotReady) XXX_DiscardUnknown() { + xxx_messageInfo_NotReady.DiscardUnknown(m) +} + +var xxx_messageInfo_NotReady proto.InternalMessageInfo + +type TransferPiece struct { + OriginalPieceId PieceID `protobuf:"bytes,1,opt,name=original_piece_id,json=originalPieceId,proto3,customtype=PieceID" json:"original_piece_id"` + PrivateKey PiecePrivateKey `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + // addressed_order_limit contains the new piece id. + AddressedOrderLimit *AddressedOrderLimit `protobuf:"bytes,3,opt,name=addressed_order_limit,json=addressedOrderLimit,proto3" json:"addressed_order_limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TransferPiece) Reset() { *m = TransferPiece{} } +func (m *TransferPiece) String() string { return proto.CompactTextString(m) } +func (*TransferPiece) ProtoMessage() {} +func (*TransferPiece) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{11} +} +func (m *TransferPiece) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TransferPiece.Unmarshal(m, b) +} +func (m *TransferPiece) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TransferPiece.Marshal(b, m, deterministic) +} +func (m *TransferPiece) XXX_Merge(src proto.Message) { + xxx_messageInfo_TransferPiece.Merge(m, src) +} +func (m *TransferPiece) XXX_Size() int { + return xxx_messageInfo_TransferPiece.Size(m) +} +func (m *TransferPiece) XXX_DiscardUnknown() { + xxx_messageInfo_TransferPiece.DiscardUnknown(m) +} + +var xxx_messageInfo_TransferPiece proto.InternalMessageInfo + +func (m *TransferPiece) GetAddressedOrderLimit() *AddressedOrderLimit { + if m != nil { + return m.AddressedOrderLimit + } + return nil +} + +type DeletePiece struct { + OriginalPieceId PieceID `protobuf:"bytes,1,opt,name=original_piece_id,json=originalPieceId,proto3,customtype=PieceID" json:"original_piece_id"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeletePiece) Reset() { *m = DeletePiece{} } +func (m *DeletePiece) String() string { return proto.CompactTextString(m) } +func (*DeletePiece) ProtoMessage() {} +func (*DeletePiece) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{12} +} +func (m *DeletePiece) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeletePiece.Unmarshal(m, b) +} +func (m *DeletePiece) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeletePiece.Marshal(b, m, deterministic) +} +func (m *DeletePiece) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeletePiece.Merge(m, src) +} +func (m *DeletePiece) XXX_Size() int { + return xxx_messageInfo_DeletePiece.Size(m) +} +func (m *DeletePiece) XXX_DiscardUnknown() { + xxx_messageInfo_DeletePiece.DiscardUnknown(m) +} + +var xxx_messageInfo_DeletePiece proto.InternalMessageInfo + +type ExitCompleted struct { + // when everything is completed + ExitCompleteSignature []byte `protobuf:"bytes,1,opt,name=exit_complete_signature,json=exitCompleteSignature,proto3" json:"exit_complete_signature,omitempty"` + // satellite who issued this exit completed + SatelliteId NodeID `protobuf:"bytes,2,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` + // storage node this exit completed was issued to + NodeId NodeID `protobuf:"bytes,3,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + // timestamp when the exit completed + Completed time.Time `protobuf:"bytes,4,opt,name=completed,proto3,stdtime" json:"completed"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExitCompleted) Reset() { *m = ExitCompleted{} } +func (m *ExitCompleted) String() string { return proto.CompactTextString(m) } +func (*ExitCompleted) ProtoMessage() {} +func (*ExitCompleted) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{13} +} +func (m *ExitCompleted) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExitCompleted.Unmarshal(m, b) +} +func (m *ExitCompleted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExitCompleted.Marshal(b, m, deterministic) +} +func (m *ExitCompleted) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExitCompleted.Merge(m, src) +} +func (m *ExitCompleted) XXX_Size() int { + return xxx_messageInfo_ExitCompleted.Size(m) +} +func (m *ExitCompleted) XXX_DiscardUnknown() { + xxx_messageInfo_ExitCompleted.DiscardUnknown(m) +} + +var xxx_messageInfo_ExitCompleted proto.InternalMessageInfo + +func (m *ExitCompleted) GetExitCompleteSignature() []byte { + if m != nil { + return m.ExitCompleteSignature + } + return nil +} + +func (m *ExitCompleted) GetCompleted() time.Time { + if m != nil { + return m.Completed + } + return time.Time{} +} + +type ExitFailed struct { + // on failure + ExitFailureSignature []byte `protobuf:"bytes,1,opt,name=exit_failure_signature,json=exitFailureSignature,proto3" json:"exit_failure_signature,omitempty"` + Reason ExitFailed_Reason `protobuf:"varint,2,opt,name=reason,proto3,enum=gracefulexit.ExitFailed_Reason" json:"reason,omitempty"` + // satellite who issued this exit failed + SatelliteId NodeID `protobuf:"bytes,3,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` + // storage node this exit failed was issued to + NodeId NodeID `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + // timestamp when the exit failed + Failed time.Time `protobuf:"bytes,5,opt,name=failed,proto3,stdtime" json:"failed"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExitFailed) Reset() { *m = ExitFailed{} } +func (m *ExitFailed) String() string { return proto.CompactTextString(m) } +func (*ExitFailed) ProtoMessage() {} +func (*ExitFailed) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{14} +} +func (m *ExitFailed) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExitFailed.Unmarshal(m, b) +} +func (m *ExitFailed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExitFailed.Marshal(b, m, deterministic) +} +func (m *ExitFailed) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExitFailed.Merge(m, src) +} +func (m *ExitFailed) XXX_Size() int { + return xxx_messageInfo_ExitFailed.Size(m) +} +func (m *ExitFailed) XXX_DiscardUnknown() { + xxx_messageInfo_ExitFailed.DiscardUnknown(m) +} + +var xxx_messageInfo_ExitFailed proto.InternalMessageInfo + +func (m *ExitFailed) GetExitFailureSignature() []byte { + if m != nil { + return m.ExitFailureSignature + } + return nil +} + +func (m *ExitFailed) GetReason() ExitFailed_Reason { + if m != nil { + return m.Reason + } + return ExitFailed_VERIFICATION_FAILED +} + +func (m *ExitFailed) GetFailed() time.Time { + if m != nil { + return m.Failed + } + return time.Time{} +} + +type SatelliteMessage struct { + // Types that are valid to be assigned to Message: + // *SatelliteMessage_NotReady + // *SatelliteMessage_TransferPiece + // *SatelliteMessage_DeletePiece + // *SatelliteMessage_ExitCompleted + // *SatelliteMessage_ExitFailed + Message isSatelliteMessage_Message `protobuf_oneof:"Message"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SatelliteMessage) Reset() { *m = SatelliteMessage{} } +func (m *SatelliteMessage) String() string { return proto.CompactTextString(m) } +func (*SatelliteMessage) ProtoMessage() {} +func (*SatelliteMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_8f0acbf2ce5fa631, []int{15} +} +func (m *SatelliteMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SatelliteMessage.Unmarshal(m, b) +} +func (m *SatelliteMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SatelliteMessage.Marshal(b, m, deterministic) +} +func (m *SatelliteMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_SatelliteMessage.Merge(m, src) +} +func (m *SatelliteMessage) XXX_Size() int { + return xxx_messageInfo_SatelliteMessage.Size(m) +} +func (m *SatelliteMessage) XXX_DiscardUnknown() { + xxx_messageInfo_SatelliteMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_SatelliteMessage proto.InternalMessageInfo + +type isSatelliteMessage_Message interface { + isSatelliteMessage_Message() +} + +type SatelliteMessage_NotReady struct { + NotReady *NotReady `protobuf:"bytes,1,opt,name=not_ready,json=notReady,proto3,oneof"` +} +type SatelliteMessage_TransferPiece struct { + TransferPiece *TransferPiece `protobuf:"bytes,2,opt,name=transfer_piece,json=transferPiece,proto3,oneof"` +} +type SatelliteMessage_DeletePiece struct { + DeletePiece *DeletePiece `protobuf:"bytes,3,opt,name=delete_piece,json=deletePiece,proto3,oneof"` +} +type SatelliteMessage_ExitCompleted struct { + ExitCompleted *ExitCompleted `protobuf:"bytes,4,opt,name=exit_completed,json=exitCompleted,proto3,oneof"` +} +type SatelliteMessage_ExitFailed struct { + ExitFailed *ExitFailed `protobuf:"bytes,5,opt,name=exit_failed,json=exitFailed,proto3,oneof"` +} + +func (*SatelliteMessage_NotReady) isSatelliteMessage_Message() {} +func (*SatelliteMessage_TransferPiece) isSatelliteMessage_Message() {} +func (*SatelliteMessage_DeletePiece) isSatelliteMessage_Message() {} +func (*SatelliteMessage_ExitCompleted) isSatelliteMessage_Message() {} +func (*SatelliteMessage_ExitFailed) isSatelliteMessage_Message() {} + +func (m *SatelliteMessage) GetMessage() isSatelliteMessage_Message { + if m != nil { + return m.Message + } + return nil +} + +func (m *SatelliteMessage) GetNotReady() *NotReady { + if x, ok := m.GetMessage().(*SatelliteMessage_NotReady); ok { + return x.NotReady + } + return nil +} + +func (m *SatelliteMessage) GetTransferPiece() *TransferPiece { + if x, ok := m.GetMessage().(*SatelliteMessage_TransferPiece); ok { + return x.TransferPiece + } + return nil +} + +func (m *SatelliteMessage) GetDeletePiece() *DeletePiece { + if x, ok := m.GetMessage().(*SatelliteMessage_DeletePiece); ok { + return x.DeletePiece + } + return nil +} + +func (m *SatelliteMessage) GetExitCompleted() *ExitCompleted { + if x, ok := m.GetMessage().(*SatelliteMessage_ExitCompleted); ok { + return x.ExitCompleted + } + return nil +} + +func (m *SatelliteMessage) GetExitFailed() *ExitFailed { + if x, ok := m.GetMessage().(*SatelliteMessage_ExitFailed); ok { + return x.ExitFailed + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SatelliteMessage) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SatelliteMessage_OneofMarshaler, _SatelliteMessage_OneofUnmarshaler, _SatelliteMessage_OneofSizer, []interface{}{ + (*SatelliteMessage_NotReady)(nil), + (*SatelliteMessage_TransferPiece)(nil), + (*SatelliteMessage_DeletePiece)(nil), + (*SatelliteMessage_ExitCompleted)(nil), + (*SatelliteMessage_ExitFailed)(nil), + } +} + +func _SatelliteMessage_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SatelliteMessage) + // Message + switch x := m.Message.(type) { + case *SatelliteMessage_NotReady: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NotReady); err != nil { + return err + } + case *SatelliteMessage_TransferPiece: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TransferPiece); err != nil { + return err + } + case *SatelliteMessage_DeletePiece: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DeletePiece); err != nil { + return err + } + case *SatelliteMessage_ExitCompleted: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExitCompleted); err != nil { + return err + } + case *SatelliteMessage_ExitFailed: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ExitFailed); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SatelliteMessage.Message has unexpected type %T", x) + } + return nil +} + +func _SatelliteMessage_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SatelliteMessage) + switch tag { + case 1: // Message.not_ready + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NotReady) + err := b.DecodeMessage(msg) + m.Message = &SatelliteMessage_NotReady{msg} + return true, err + case 2: // Message.transfer_piece + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TransferPiece) + err := b.DecodeMessage(msg) + m.Message = &SatelliteMessage_TransferPiece{msg} + return true, err + case 3: // Message.delete_piece + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DeletePiece) + err := b.DecodeMessage(msg) + m.Message = &SatelliteMessage_DeletePiece{msg} + return true, err + case 4: // Message.exit_completed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExitCompleted) + err := b.DecodeMessage(msg) + m.Message = &SatelliteMessage_ExitCompleted{msg} + return true, err + case 5: // Message.exit_failed + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ExitFailed) + err := b.DecodeMessage(msg) + m.Message = &SatelliteMessage_ExitFailed{msg} + return true, err + default: + return false, nil + } +} + +func _SatelliteMessage_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SatelliteMessage) + // Message + switch x := m.Message.(type) { + case *SatelliteMessage_NotReady: + s := proto.Size(x.NotReady) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *SatelliteMessage_TransferPiece: + s := proto.Size(x.TransferPiece) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *SatelliteMessage_DeletePiece: + s := proto.Size(x.DeletePiece) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *SatelliteMessage_ExitCompleted: + s := proto.Size(x.ExitCompleted) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *SatelliteMessage_ExitFailed: + s := proto.Size(x.ExitFailed) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterEnum("gracefulexit.TransferFailed_Error", TransferFailed_Error_name, TransferFailed_Error_value) + proto.RegisterEnum("gracefulexit.ExitFailed_Reason", ExitFailed_Reason_name, ExitFailed_Reason_value) + proto.RegisterType((*GetNonExitingSatellitesRequest)(nil), "gracefulexit.GetNonExitingSatellitesRequest") + proto.RegisterType((*InitiateGracefulExitRequest)(nil), "gracefulexit.InitiateGracefulExitRequest") + proto.RegisterType((*NonExitingSatellite)(nil), "gracefulexit.NonExitingSatellite") + proto.RegisterType((*GetNonExitingSatellitesResponse)(nil), "gracefulexit.GetNonExitingSatellitesResponse") + proto.RegisterType((*GetExitProgressRequest)(nil), "gracefulexit.GetExitProgressRequest") + proto.RegisterType((*GetExitProgressResponse)(nil), "gracefulexit.GetExitProgressResponse") + proto.RegisterType((*ExitProgress)(nil), "gracefulexit.ExitProgress") + proto.RegisterType((*TransferSucceeded)(nil), "gracefulexit.TransferSucceeded") + proto.RegisterType((*TransferFailed)(nil), "gracefulexit.TransferFailed") + proto.RegisterType((*StorageNodeMessage)(nil), "gracefulexit.StorageNodeMessage") + proto.RegisterType((*NotReady)(nil), "gracefulexit.NotReady") + proto.RegisterType((*TransferPiece)(nil), "gracefulexit.TransferPiece") + proto.RegisterType((*DeletePiece)(nil), "gracefulexit.DeletePiece") + proto.RegisterType((*ExitCompleted)(nil), "gracefulexit.ExitCompleted") + proto.RegisterType((*ExitFailed)(nil), "gracefulexit.ExitFailed") + proto.RegisterType((*SatelliteMessage)(nil), "gracefulexit.SatelliteMessage") +} + +func init() { proto.RegisterFile("gracefulexit.proto", fileDescriptor_8f0acbf2ce5fa631) } + +var fileDescriptor_8f0acbf2ce5fa631 = []byte{ + // 1260 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x5d, 0x73, 0xdb, 0x54, + 0x13, 0xb6, 0x9c, 0xe6, 0xc3, 0x6b, 0x27, 0x71, 0x4e, 0xbe, 0xfc, 0x3a, 0x6d, 0xed, 0x57, 0xc0, + 0x34, 0x9d, 0xa1, 0x0e, 0x04, 0x28, 0xcc, 0x94, 0x81, 0x91, 0x63, 0x25, 0x16, 0x4d, 0xe5, 0xf4, + 0xc4, 0x09, 0x1d, 0x66, 0x40, 0xa3, 0x5a, 0x1b, 0x47, 0xd4, 0xd6, 0x11, 0x92, 0xdc, 0x69, 0x6f, + 0xb8, 0xe0, 0x17, 0x70, 0xc9, 0x35, 0xbf, 0x06, 0x6e, 0xb9, 0x83, 0x19, 0xca, 0x70, 0xc5, 0xdf, + 0x60, 0x8e, 0x74, 0xa4, 0x48, 0xb1, 0x63, 0xd2, 0xe9, 0x95, 0xad, 0xdd, 0x67, 0xf7, 0xec, 0xee, + 0xd9, 0x67, 0xcf, 0x02, 0xe9, 0x7b, 0x66, 0x0f, 0xcf, 0x46, 0x03, 0x7c, 0x61, 0x07, 0x0d, 0xd7, + 0x63, 0x01, 0x23, 0xa5, 0xb4, 0xac, 0x0a, 0x7d, 0xd6, 0x67, 0x91, 0xa6, 0x5a, 0xeb, 0x33, 0xd6, + 0x1f, 0xe0, 0x4e, 0xf8, 0xf5, 0x74, 0x74, 0xb6, 0x13, 0xd8, 0x43, 0xf4, 0x03, 0x73, 0xe8, 0x0a, + 0xc0, 0xd2, 0x10, 0x03, 0xd3, 0x76, 0xce, 0x62, 0x83, 0x12, 0xf3, 0x2c, 0xf4, 0xfc, 0xe8, 0x4b, + 0xae, 0xc3, 0xed, 0x03, 0x0c, 0x74, 0xe6, 0xa8, 0x2f, 0xec, 0xc0, 0x76, 0xfa, 0xc7, 0x66, 0x80, + 0x83, 0x81, 0x1d, 0xa0, 0x4f, 0xf1, 0xbb, 0x11, 0xfa, 0x81, 0xbc, 0x0f, 0x5b, 0x9a, 0x63, 0x07, + 0xb6, 0x19, 0xe0, 0x81, 0x08, 0x82, 0x63, 0x85, 0x9a, 0xdc, 0x81, 0x79, 0x87, 0x59, 0x68, 0xd8, + 0x56, 0x45, 0xaa, 0x4b, 0xdb, 0xa5, 0xe6, 0xd2, 0x2f, 0xaf, 0x6a, 0xb9, 0x3f, 0x5e, 0xd5, 0xe6, + 0x74, 0x66, 0xa1, 0xd6, 0xa2, 0x73, 0x5c, 0xad, 0x59, 0xf2, 0xf7, 0xb0, 0x3a, 0xe1, 0x98, 0x6b, + 0xdb, 0x93, 0x1a, 0x14, 0x2d, 0x36, 0x34, 0x6d, 0xc7, 0x70, 0xcc, 0x21, 0x56, 0xf2, 0x75, 0x69, + 0xbb, 0x40, 0x21, 0x12, 0xe9, 0xe6, 0x10, 0xc9, 0x2d, 0x00, 0xdf, 0x35, 0x7b, 0x68, 0x8c, 0x7c, + 0xb4, 0x2a, 0x33, 0x75, 0x69, 0x5b, 0xa2, 0x85, 0x50, 0x72, 0xe2, 0xa3, 0x25, 0x5b, 0x50, 0xbb, + 0x32, 0x53, 0xdf, 0x65, 0x8e, 0x8f, 0x44, 0x01, 0xf0, 0x13, 0x69, 0x45, 0xaa, 0xcf, 0x6c, 0x17, + 0x77, 0xff, 0xdf, 0xc8, 0x5c, 0xc7, 0x04, 0x7b, 0x9a, 0x32, 0x92, 0x2b, 0xb0, 0x71, 0x80, 0x01, + 0x87, 0x1c, 0x79, 0xac, 0xef, 0xa1, 0x9f, 0xd4, 0xf1, 0x31, 0x6c, 0x8e, 0x69, 0xc4, 0xb9, 0xf7, + 0x61, 0xc1, 0x15, 0x32, 0x71, 0x6a, 0x35, 0x7b, 0x6a, 0xc6, 0x2a, 0xc1, 0xca, 0xbf, 0x49, 0x50, + 0x4a, 0xab, 0x2e, 0xd7, 0x48, 0x1a, 0xab, 0x51, 0xaa, 0xda, 0xf9, 0xa9, 0xd5, 0xbe, 0x0b, 0x65, + 0x17, 0xbd, 0x1e, 0x3a, 0x81, 0xd1, 0x63, 0x43, 0x77, 0x80, 0x01, 0x86, 0x25, 0xcd, 0xd3, 0x65, + 0x21, 0xdf, 0x13, 0x62, 0x72, 0x1b, 0xc0, 0x1f, 0xf5, 0x7a, 0xe8, 0xfb, 0x67, 0xa3, 0x41, 0xe5, + 0x46, 0x5d, 0xda, 0x5e, 0xa0, 0x29, 0x09, 0xb9, 0x07, 0x44, 0xb8, 0xb0, 0x99, 0x63, 0x78, 0xd8, + 0x43, 0xdb, 0x0d, 0x2a, 0xb3, 0xfc, 0x78, 0xba, 0x72, 0xa1, 0xa1, 0x91, 0x42, 0xfe, 0x39, 0x0f, + 0x2b, 0x5d, 0xcf, 0x74, 0xfc, 0x33, 0xf4, 0x8e, 0xb9, 0x17, 0xb4, 0xd0, 0x22, 0x2d, 0x58, 0x63, + 0x9e, 0xdd, 0xb7, 0x1d, 0x73, 0x60, 0x84, 0x0d, 0x6c, 0x0c, 0xec, 0xa1, 0x1d, 0x84, 0x29, 0x16, + 0x77, 0x49, 0x43, 0x34, 0x75, 0x87, 0xff, 0x1c, 0x72, 0x0d, 0x25, 0x31, 0xfe, 0x42, 0x46, 0x14, + 0x58, 0x4d, 0xbc, 0xb8, 0x36, 0xf6, 0xd0, 0x38, 0x37, 0xfd, 0xf3, 0xb0, 0x14, 0xc5, 0xdd, 0x95, + 0xd8, 0xc9, 0x11, 0xd7, 0xb4, 0x4d, 0xff, 0x9c, 0xae, 0xc4, 0xe8, 0x44, 0x44, 0x0e, 0x60, 0xc3, + 0x43, 0x77, 0x60, 0xf6, 0x70, 0xc8, 0x8b, 0x93, 0xf2, 0x32, 0x73, 0x95, 0x97, 0xb5, 0x94, 0xc1, + 0x85, 0xa3, 0x07, 0xb0, 0x72, 0x29, 0x16, 0xdb, 0x0a, 0xab, 0x57, 0x6a, 0x2e, 0x8b, 0x4b, 0x99, + 0x0f, 0xd1, 0x5a, 0x8b, 0x2e, 0x67, 0xe2, 0xd0, 0x2c, 0xf9, 0x6f, 0x09, 0x96, 0xe2, 0x22, 0xed, + 0x9b, 0xf6, 0x00, 0xad, 0xc9, 0xfe, 0xa4, 0xeb, 0xf9, 0x23, 0x9f, 0xc0, 0x2c, 0x7a, 0x1e, 0xf3, + 0xc2, 0x52, 0x2c, 0xed, 0xca, 0xd9, 0xf6, 0xcb, 0x9e, 0xd4, 0x50, 0x39, 0x92, 0x46, 0x06, 0xf2, + 0x13, 0x98, 0x0d, 0xbf, 0xc9, 0x22, 0x14, 0xf4, 0x4e, 0xd7, 0xd8, 0xef, 0x9c, 0xe8, 0xad, 0x72, + 0x8e, 0xdc, 0x84, 0xca, 0x71, 0xb7, 0x43, 0x95, 0x03, 0xd5, 0xd0, 0x3b, 0x2d, 0xd5, 0x38, 0xd1, + 0x95, 0x53, 0x45, 0x3b, 0x54, 0x9a, 0x87, 0x6a, 0x59, 0x22, 0xeb, 0xb0, 0xd2, 0x56, 0x8e, 0xdb, + 0xc6, 0xa9, 0x4a, 0xb5, 0x7d, 0x6d, 0x4f, 0xe9, 0x6a, 0x1d, 0xbd, 0x9c, 0x27, 0x45, 0x98, 0x3f, + 0xd1, 0x1f, 0xea, 0x9d, 0x2f, 0xf5, 0x32, 0xc8, 0x3f, 0x49, 0x40, 0x8e, 0x03, 0xe6, 0x99, 0x7d, + 0xe4, 0xcd, 0xf9, 0x08, 0x7d, 0xdf, 0xec, 0x23, 0xf9, 0x1c, 0x0a, 0x7e, 0xdc, 0x16, 0xe2, 0xfa, + 0x6b, 0x93, 0xc3, 0x4d, 0xba, 0xa7, 0x9d, 0xa3, 0x17, 0x36, 0xe4, 0x3e, 0xcc, 0x9d, 0x85, 0x89, + 0x88, 0x7b, 0xbf, 0x39, 0x2d, 0xd9, 0x76, 0x8e, 0x0a, 0x74, 0xb3, 0x00, 0xf3, 0x22, 0x06, 0x19, + 0x60, 0x41, 0x67, 0x01, 0x45, 0xd3, 0x7a, 0x29, 0xff, 0x2e, 0xc1, 0x62, 0x6c, 0x13, 0x96, 0xf3, + 0x4d, 0x6f, 0xa2, 0xe8, 0x7a, 0xf6, 0x73, 0x33, 0x40, 0xe3, 0x19, 0xbe, 0x14, 0x2c, 0xdd, 0x14, + 0x66, 0xcb, 0x21, 0xea, 0x28, 0xd2, 0x3f, 0xc4, 0x97, 0x14, 0xdc, 0xe4, 0x3f, 0x79, 0x0c, 0xeb, + 0xa6, 0x65, 0xf1, 0x39, 0x80, 0x56, 0x86, 0x23, 0x51, 0x63, 0xde, 0x6a, 0x24, 0x0f, 0x81, 0x12, + 0xc3, 0x52, 0x74, 0x59, 0x35, 0xc7, 0x85, 0xf2, 0x17, 0x50, 0x6c, 0x21, 0x27, 0xf9, 0x9b, 0x27, + 0x26, 0xff, 0x23, 0xc1, 0x22, 0x1f, 0x56, 0xf1, 0xdc, 0xe0, 0x17, 0xb1, 0xc9, 0x2b, 0x9e, 0x0c, + 0x18, 0xc3, 0xb7, 0xfb, 0x8e, 0x19, 0x8c, 0xbc, 0x68, 0x72, 0x95, 0xe8, 0x3a, 0xa6, 0xf0, 0xc7, + 0xb1, 0x92, 0xbc, 0x0f, 0xa5, 0x64, 0xe2, 0x5e, 0x3d, 0xc9, 0x8a, 0x09, 0x46, 0xb3, 0xd2, 0x73, + 0x6f, 0x66, 0xea, 0xdc, 0x6b, 0x42, 0x21, 0x0e, 0x27, 0x62, 0x63, 0x38, 0x8b, 0xc3, 0x27, 0xb6, + 0x11, 0x3f, 0xb1, 0x8d, 0x6e, 0xfc, 0xc4, 0x36, 0x17, 0xb8, 0x9b, 0x1f, 0xff, 0xaa, 0x49, 0xf4, + 0xc2, 0x4c, 0xfe, 0x61, 0x06, 0x80, 0x67, 0x2a, 0x88, 0xf9, 0x21, 0x6c, 0x84, 0x69, 0xf2, 0x36, + 0x1a, 0x79, 0xe3, 0x59, 0xae, 0xa1, 0xc0, 0x8e, 0xbc, 0x54, 0x92, 0x1f, 0xc3, 0x9c, 0x87, 0xa6, + 0xcf, 0x1c, 0x41, 0xc9, 0xda, 0xf8, 0x8b, 0x20, 0xe8, 0x48, 0x43, 0x18, 0x15, 0xf0, 0xb1, 0xea, + 0xcc, 0xbc, 0x56, 0x75, 0x6e, 0x4c, 0xad, 0xce, 0xa7, 0x09, 0x75, 0x66, 0x5f, 0xa3, 0x34, 0xc2, + 0x46, 0x7e, 0x06, 0x73, 0x51, 0xac, 0x64, 0x13, 0x56, 0xd3, 0xcc, 0x37, 0xf6, 0x15, 0xed, 0x50, + 0xe5, 0x53, 0xa3, 0x06, 0x5b, 0x9a, 0xae, 0xec, 0x75, 0xb5, 0x53, 0xd5, 0xe8, 0x6a, 0x8f, 0xd4, + 0x7d, 0xaa, 0x3c, 0x52, 0x0d, 0xf5, 0xc9, 0x9e, 0xaa, 0xb6, 0xd4, 0x56, 0x59, 0x22, 0x77, 0xe0, + 0xad, 0xce, 0xa9, 0x4a, 0x95, 0xc3, 0xc3, 0xd0, 0xe8, 0x84, 0xaa, 0xc6, 0x91, 0x4a, 0xf7, 0x54, + 0xbd, 0xcb, 0x27, 0x4d, 0x02, 0xcc, 0xcb, 0x7f, 0xe6, 0xa1, 0x9c, 0x3c, 0xd1, 0xf1, 0xec, 0xf8, + 0x08, 0x0a, 0x0e, 0x0b, 0x0c, 0x8f, 0x13, 0x57, 0xcc, 0x8e, 0x8d, 0xcb, 0xef, 0x7b, 0x44, 0xeb, + 0x76, 0x8e, 0x2e, 0x38, 0xe2, 0x3f, 0x69, 0xc1, 0x52, 0x20, 0x18, 0x1e, 0xf5, 0xbd, 0x98, 0x1c, + 0x5b, 0x93, 0x27, 0x47, 0x34, 0xe3, 0x73, 0x74, 0x31, 0xc8, 0x8c, 0x85, 0xcf, 0xa0, 0x64, 0x85, + 0x64, 0x12, 0x3e, 0x22, 0x5a, 0xfe, 0x2f, 0xeb, 0x23, 0x45, 0xb7, 0x76, 0x8e, 0x16, 0xad, 0x14, + 0xfb, 0x5a, 0xb0, 0x94, 0xa1, 0x4b, 0xdc, 0x9f, 0x5b, 0xe3, 0x9d, 0x91, 0x70, 0x8c, 0x47, 0x81, + 0x19, 0xd2, 0x3d, 0x80, 0x62, 0xd2, 0x8d, 0xc9, 0x3d, 0x56, 0xae, 0x6a, 0xae, 0x76, 0x8e, 0x02, + 0x26, 0x5f, 0xa9, 0x11, 0xb8, 0xfb, 0x6b, 0x1e, 0xca, 0xbc, 0x3b, 0xd2, 0x3b, 0x21, 0x79, 0x1e, + 0xee, 0x38, 0x93, 0x76, 0x2c, 0xf2, 0x6e, 0xf6, 0x88, 0xe9, 0x4b, 0x67, 0xf5, 0xde, 0x35, 0xd1, + 0x62, 0x81, 0xfa, 0x1a, 0xd6, 0x26, 0xed, 0xa8, 0xe4, 0x6e, 0xd6, 0xcd, 0x94, 0x3d, 0xb6, 0x3a, + 0x65, 0xe3, 0x22, 0xdf, 0xc0, 0xf2, 0xa5, 0xd5, 0x8d, 0xbc, 0x3d, 0x16, 0xe0, 0x84, 0x9d, 0xaf, + 0xfa, 0xce, 0x7f, 0xa0, 0xa2, 0xf0, 0x77, 0xcf, 0x61, 0x3d, 0x49, 0x2a, 0x13, 0x7f, 0x07, 0xe6, + 0x8f, 0x3c, 0xc6, 0x17, 0x29, 0x52, 0xcf, 0xba, 0x1a, 0x7f, 0x18, 0xab, 0xb7, 0x2f, 0x21, 0x2e, + 0x35, 0xff, 0xb6, 0xf4, 0x9e, 0xd4, 0x5c, 0xfb, 0x8a, 0xf8, 0x01, 0xf3, 0xbe, 0x6d, 0xd8, 0x6c, + 0xa7, 0xc7, 0x86, 0x43, 0xe6, 0xec, 0xb8, 0x4f, 0x9f, 0xce, 0x85, 0xf4, 0xfd, 0xe0, 0xdf, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xa7, 0x23, 0xa6, 0xb1, 0x7a, 0x0c, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCNodeGracefulExitClient interface { + DRPCConn() drpc.Conn + + // GetSatellitesList returns a list of satellites that the storagenode has not exited. + GetNonExitingSatellites(ctx context.Context, in *GetNonExitingSatellitesRequest) (*GetNonExitingSatellitesResponse, error) + // InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting. + InitiateGracefulExit(ctx context.Context, in *InitiateGracefulExitRequest) (*ExitProgress, error) + // GetExitProgress returns graceful exit status on each satellite for a given storagenode. + GetExitProgress(ctx context.Context, in *GetExitProgressRequest) (*GetExitProgressResponse, error) +} + +type drpcNodeGracefulExitClient struct { + cc drpc.Conn +} + +func NewDRPCNodeGracefulExitClient(cc drpc.Conn) DRPCNodeGracefulExitClient { + return &drpcNodeGracefulExitClient{cc} +} + +func (c *drpcNodeGracefulExitClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcNodeGracefulExitClient) GetNonExitingSatellites(ctx context.Context, in *GetNonExitingSatellitesRequest) (*GetNonExitingSatellitesResponse, error) { + out := new(GetNonExitingSatellitesResponse) + err := c.cc.Invoke(ctx, "/gracefulexit.NodeGracefulExit/GetNonExitingSatellites", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcNodeGracefulExitClient) InitiateGracefulExit(ctx context.Context, in *InitiateGracefulExitRequest) (*ExitProgress, error) { + out := new(ExitProgress) + err := c.cc.Invoke(ctx, "/gracefulexit.NodeGracefulExit/InitiateGracefulExit", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcNodeGracefulExitClient) GetExitProgress(ctx context.Context, in *GetExitProgressRequest) (*GetExitProgressResponse, error) { + out := new(GetExitProgressResponse) + err := c.cc.Invoke(ctx, "/gracefulexit.NodeGracefulExit/GetExitProgress", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCNodeGracefulExitServer interface { + // GetSatellitesList returns a list of satellites that the storagenode has not exited. + GetNonExitingSatellites(context.Context, *GetNonExitingSatellitesRequest) (*GetNonExitingSatellitesResponse, error) + // InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting. + InitiateGracefulExit(context.Context, *InitiateGracefulExitRequest) (*ExitProgress, error) + // GetExitProgress returns graceful exit status on each satellite for a given storagenode. + GetExitProgress(context.Context, *GetExitProgressRequest) (*GetExitProgressResponse, error) +} + +type DRPCNodeGracefulExitDescription struct{} + +func (DRPCNodeGracefulExitDescription) NumMethods() int { return 3 } + +func (DRPCNodeGracefulExitDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/gracefulexit.NodeGracefulExit/GetNonExitingSatellites", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeGracefulExitServer). + GetNonExitingSatellites( + ctx, + in1.(*GetNonExitingSatellitesRequest), + ) + }, DRPCNodeGracefulExitServer.GetNonExitingSatellites, true + case 1: + return "/gracefulexit.NodeGracefulExit/InitiateGracefulExit", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeGracefulExitServer). + InitiateGracefulExit( + ctx, + in1.(*InitiateGracefulExitRequest), + ) + }, DRPCNodeGracefulExitServer.InitiateGracefulExit, true + case 2: + return "/gracefulexit.NodeGracefulExit/GetExitProgress", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeGracefulExitServer). + GetExitProgress( + ctx, + in1.(*GetExitProgressRequest), + ) + }, DRPCNodeGracefulExitServer.GetExitProgress, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterNodeGracefulExit(mux drpc.Mux, impl DRPCNodeGracefulExitServer) error { + return mux.Register(impl, DRPCNodeGracefulExitDescription{}) +} + +type DRPCNodeGracefulExit_GetNonExitingSatellitesStream interface { + drpc.Stream + SendAndClose(*GetNonExitingSatellitesResponse) error +} + +type drpcNodeGracefulExitGetNonExitingSatellitesStream struct { + drpc.Stream +} + +func (x *drpcNodeGracefulExitGetNonExitingSatellitesStream) SendAndClose(m *GetNonExitingSatellitesResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCNodeGracefulExit_InitiateGracefulExitStream interface { + drpc.Stream + SendAndClose(*ExitProgress) error +} + +type drpcNodeGracefulExitInitiateGracefulExitStream struct { + drpc.Stream +} + +func (x *drpcNodeGracefulExitInitiateGracefulExitStream) SendAndClose(m *ExitProgress) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCNodeGracefulExit_GetExitProgressStream interface { + drpc.Stream + SendAndClose(*GetExitProgressResponse) error +} + +type drpcNodeGracefulExitGetExitProgressStream struct { + drpc.Stream +} + +func (x *drpcNodeGracefulExitGetExitProgressStream) SendAndClose(m *GetExitProgressResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCSatelliteGracefulExitClient interface { + DRPCConn() drpc.Conn + + // Process is called by storage nodes to initiate the graceful exit, get pieces to transfer, and receive exit status. + Process(ctx context.Context) (DRPCSatelliteGracefulExit_ProcessClient, error) +} + +type drpcSatelliteGracefulExitClient struct { + cc drpc.Conn +} + +func NewDRPCSatelliteGracefulExitClient(cc drpc.Conn) DRPCSatelliteGracefulExitClient { + return &drpcSatelliteGracefulExitClient{cc} +} + +func (c *drpcSatelliteGracefulExitClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcSatelliteGracefulExitClient) Process(ctx context.Context) (DRPCSatelliteGracefulExit_ProcessClient, error) { + stream, err := c.cc.NewStream(ctx, "/gracefulexit.SatelliteGracefulExit/Process") + if err != nil { + return nil, err + } + x := &drpcSatelliteGracefulExitProcessClient{stream} + return x, nil +} + +type DRPCSatelliteGracefulExit_ProcessClient interface { + drpc.Stream + Send(*StorageNodeMessage) error + Recv() (*SatelliteMessage, error) +} + +type drpcSatelliteGracefulExitProcessClient struct { + drpc.Stream +} + +func (x *drpcSatelliteGracefulExitProcessClient) Send(m *StorageNodeMessage) error { + return x.MsgSend(m) +} + +func (x *drpcSatelliteGracefulExitProcessClient) Recv() (*SatelliteMessage, error) { + m := new(SatelliteMessage) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +type DRPCSatelliteGracefulExitServer interface { + // Process is called by storage nodes to initiate the graceful exit, get pieces to transfer, and receive exit status. + Process(DRPCSatelliteGracefulExit_ProcessStream) error +} + +type DRPCSatelliteGracefulExitDescription struct{} + +func (DRPCSatelliteGracefulExitDescription) NumMethods() int { return 1 } + +func (DRPCSatelliteGracefulExitDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/gracefulexit.SatelliteGracefulExit/Process", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return nil, srv.(DRPCSatelliteGracefulExitServer). + Process( + &drpcSatelliteGracefulExitProcessStream{in1.(drpc.Stream)}, + ) + }, DRPCSatelliteGracefulExitServer.Process, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterSatelliteGracefulExit(mux drpc.Mux, impl DRPCSatelliteGracefulExitServer) error { + return mux.Register(impl, DRPCSatelliteGracefulExitDescription{}) +} + +type DRPCSatelliteGracefulExit_ProcessStream interface { + drpc.Stream + Send(*SatelliteMessage) error + Recv() (*StorageNodeMessage, error) +} + +type drpcSatelliteGracefulExitProcessStream struct { + drpc.Stream +} + +func (x *drpcSatelliteGracefulExitProcessStream) Send(m *SatelliteMessage) error { + return x.MsgSend(m) +} + +func (x *drpcSatelliteGracefulExitProcessStream) Recv() (*StorageNodeMessage, error) { + m := new(StorageNodeMessage) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/gracefulexit.proto b/vendor/storj.io/common/pb/gracefulexit.proto new file mode 100644 index 000000000..218a85567 --- /dev/null +++ b/vendor/storj.io/common/pb/gracefulexit.proto @@ -0,0 +1,137 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "metainfo.proto"; +import "orders.proto"; + +package gracefulexit; + +// NodeGracefulExit is a private service on storagenodes +service NodeGracefulExit { + // GetSatellitesList returns a list of satellites that the storagenode has not exited. + rpc GetNonExitingSatellites(GetNonExitingSatellitesRequest) returns (GetNonExitingSatellitesResponse); + // InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting. + rpc InitiateGracefulExit(InitiateGracefulExitRequest) returns (ExitProgress); + // GetExitProgress returns graceful exit status on each satellite for a given storagenode. + rpc GetExitProgress(GetExitProgressRequest) returns (GetExitProgressResponse); +} + +message GetNonExitingSatellitesRequest{} + +message InitiateGracefulExitRequest { + bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; +} + +// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit +message NonExitingSatellite { + bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + string domain_name = 2; + double space_used = 3; +} + +message GetNonExitingSatellitesResponse { + repeated NonExitingSatellite satellites = 1; +} + +message GetExitProgressRequest {} + +message GetExitProgressResponse { + repeated ExitProgress progress = 1; +} + +message ExitProgress { + string domain_name = 1; + bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + float percent_complete = 3; + bool successful = 4; + bytes completion_receipt = 5; +} + +service SatelliteGracefulExit { + // Process is called by storage nodes to initiate the graceful exit, get pieces to transfer, and receive exit status. + rpc Process(stream StorageNodeMessage) returns (stream SatelliteMessage); +} + +message TransferSucceeded { + orders.OrderLimit original_order_limit = 1; + orders.PieceHash original_piece_hash = 2; + orders.PieceHash replacement_piece_hash = 3; + bytes original_piece_id = 4 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; +} + +message TransferFailed { + bytes original_piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + enum Error { + NOT_FOUND = 0; + STORAGE_NODE_UNAVAILABLE = 1; + HASH_VERIFICATION = 2; + + UNKNOWN = 10; + } + Error error = 2; +} + +message StorageNodeMessage { + oneof Message { + TransferSucceeded succeeded = 1; + TransferFailed failed = 2; + } +} + +message NotReady {} + +message TransferPiece { + bytes original_piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + bytes private_key = 2 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; + + // addressed_order_limit contains the new piece id. + metainfo.AddressedOrderLimit addressed_order_limit =3; +} + +message DeletePiece { + bytes original_piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; +} + +message ExitCompleted { + // when everything is completed + bytes exit_complete_signature = 1; + // satellite who issued this exit completed + bytes satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + // storage node this exit completed was issued to + bytes node_id = 3 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + // timestamp when the exit completed + google.protobuf.Timestamp completed = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message ExitFailed { + enum Reason { + VERIFICATION_FAILED = 0; + INACTIVE_TIMEFRAME_EXCEEDED = 1; + OVERALL_FAILURE_PERCENTAGE_EXCEEDED = 2; + } + // on failure + bytes exit_failure_signature = 1; + Reason reason = 2; + // satellite who issued this exit failed + bytes satellite_id = 3 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + // storage node this exit failed was issued to + bytes node_id = 4 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + // timestamp when the exit failed + google.protobuf.Timestamp failed = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message SatelliteMessage { + oneof Message { + NotReady not_ready = 1; + TransferPiece transfer_piece = 2; + DeletePiece delete_piece = 3; + ExitCompleted exit_completed = 4; + ExitFailed exit_failed = 5; + } +} + diff --git a/vendor/storj.io/common/pb/heldamount.pb.go b/vendor/storj.io/common/pb/heldamount.pb.go new file mode 100644 index 000000000..6512fff15 --- /dev/null +++ b/vendor/storj.io/common/pb/heldamount.pb.go @@ -0,0 +1,483 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: heldamount.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type GetHeldAmountRequest struct { + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHeldAmountRequest) Reset() { *m = GetHeldAmountRequest{} } +func (m *GetHeldAmountRequest) String() string { return proto.CompactTextString(m) } +func (*GetHeldAmountRequest) ProtoMessage() {} +func (*GetHeldAmountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c5d9f6e3ee97993, []int{0} +} +func (m *GetHeldAmountRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHeldAmountRequest.Unmarshal(m, b) +} +func (m *GetHeldAmountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHeldAmountRequest.Marshal(b, m, deterministic) +} +func (m *GetHeldAmountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHeldAmountRequest.Merge(m, src) +} +func (m *GetHeldAmountRequest) XXX_Size() int { + return xxx_messageInfo_GetHeldAmountRequest.Size(m) +} +func (m *GetHeldAmountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHeldAmountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHeldAmountRequest proto.InternalMessageInfo + +func (m *GetHeldAmountRequest) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + +type GetHeldAmountResponse struct { + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + Codes string `protobuf:"bytes,4,opt,name=codes,proto3" json:"codes,omitempty"` + UsageAtRest float32 `protobuf:"fixed32,5,opt,name=usage_at_rest,json=usageAtRest,proto3" json:"usage_at_rest,omitempty"` + UsageGet int64 `protobuf:"varint,6,opt,name=usage_get,json=usageGet,proto3" json:"usage_get,omitempty"` + UsagePut int64 `protobuf:"varint,7,opt,name=usage_put,json=usagePut,proto3" json:"usage_put,omitempty"` + UsageGetRepair int64 `protobuf:"varint,8,opt,name=usage_get_repair,json=usageGetRepair,proto3" json:"usage_get_repair,omitempty"` + UsagePutRepair int64 `protobuf:"varint,9,opt,name=usage_put_repair,json=usagePutRepair,proto3" json:"usage_put_repair,omitempty"` + UsageGetAudit int64 `protobuf:"varint,10,opt,name=usage_get_audit,json=usageGetAudit,proto3" json:"usage_get_audit,omitempty"` + CompAtRest int64 `protobuf:"varint,11,opt,name=comp_at_rest,json=compAtRest,proto3" json:"comp_at_rest,omitempty"` + CompGet int64 `protobuf:"varint,12,opt,name=comp_get,json=compGet,proto3" json:"comp_get,omitempty"` + CompPut int64 `protobuf:"varint,13,opt,name=comp_put,json=compPut,proto3" json:"comp_put,omitempty"` + CompGetRepair int64 `protobuf:"varint,14,opt,name=comp_get_repair,json=compGetRepair,proto3" json:"comp_get_repair,omitempty"` + CompPutRepair int64 `protobuf:"varint,15,opt,name=comp_put_repair,json=compPutRepair,proto3" json:"comp_put_repair,omitempty"` + CompGetAudit int64 `protobuf:"varint,16,opt,name=comp_get_audit,json=compGetAudit,proto3" json:"comp_get_audit,omitempty"` + SurgePercent int64 `protobuf:"varint,17,opt,name=surge_percent,json=surgePercent,proto3" json:"surge_percent,omitempty"` + Held int64 `protobuf:"varint,18,opt,name=held,proto3" json:"held,omitempty"` + Owed int64 `protobuf:"varint,19,opt,name=owed,proto3" json:"owed,omitempty"` + Disposed int64 `protobuf:"varint,20,opt,name=disposed,proto3" json:"disposed,omitempty"` + Paid int64 `protobuf:"varint,21,opt,name=paid,proto3" json:"paid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHeldAmountResponse) Reset() { *m = GetHeldAmountResponse{} } +func (m *GetHeldAmountResponse) String() string { return proto.CompactTextString(m) } +func (*GetHeldAmountResponse) ProtoMessage() {} +func (*GetHeldAmountResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c5d9f6e3ee97993, []int{1} +} +func (m *GetHeldAmountResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHeldAmountResponse.Unmarshal(m, b) +} +func (m *GetHeldAmountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHeldAmountResponse.Marshal(b, m, deterministic) +} +func (m *GetHeldAmountResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHeldAmountResponse.Merge(m, src) +} +func (m *GetHeldAmountResponse) XXX_Size() int { + return xxx_messageInfo_GetHeldAmountResponse.Size(m) +} +func (m *GetHeldAmountResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHeldAmountResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHeldAmountResponse proto.InternalMessageInfo + +func (m *GetHeldAmountResponse) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + +func (m *GetHeldAmountResponse) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *GetHeldAmountResponse) GetCodes() string { + if m != nil { + return m.Codes + } + return "" +} + +func (m *GetHeldAmountResponse) GetUsageAtRest() float32 { + if m != nil { + return m.UsageAtRest + } + return 0 +} + +func (m *GetHeldAmountResponse) GetUsageGet() int64 { + if m != nil { + return m.UsageGet + } + return 0 +} + +func (m *GetHeldAmountResponse) GetUsagePut() int64 { + if m != nil { + return m.UsagePut + } + return 0 +} + +func (m *GetHeldAmountResponse) GetUsageGetRepair() int64 { + if m != nil { + return m.UsageGetRepair + } + return 0 +} + +func (m *GetHeldAmountResponse) GetUsagePutRepair() int64 { + if m != nil { + return m.UsagePutRepair + } + return 0 +} + +func (m *GetHeldAmountResponse) GetUsageGetAudit() int64 { + if m != nil { + return m.UsageGetAudit + } + return 0 +} + +func (m *GetHeldAmountResponse) GetCompAtRest() int64 { + if m != nil { + return m.CompAtRest + } + return 0 +} + +func (m *GetHeldAmountResponse) GetCompGet() int64 { + if m != nil { + return m.CompGet + } + return 0 +} + +func (m *GetHeldAmountResponse) GetCompPut() int64 { + if m != nil { + return m.CompPut + } + return 0 +} + +func (m *GetHeldAmountResponse) GetCompGetRepair() int64 { + if m != nil { + return m.CompGetRepair + } + return 0 +} + +func (m *GetHeldAmountResponse) GetCompPutRepair() int64 { + if m != nil { + return m.CompPutRepair + } + return 0 +} + +func (m *GetHeldAmountResponse) GetCompGetAudit() int64 { + if m != nil { + return m.CompGetAudit + } + return 0 +} + +func (m *GetHeldAmountResponse) GetSurgePercent() int64 { + if m != nil { + return m.SurgePercent + } + return 0 +} + +func (m *GetHeldAmountResponse) GetHeld() int64 { + if m != nil { + return m.Held + } + return 0 +} + +func (m *GetHeldAmountResponse) GetOwed() int64 { + if m != nil { + return m.Owed + } + return 0 +} + +func (m *GetHeldAmountResponse) GetDisposed() int64 { + if m != nil { + return m.Disposed + } + return 0 +} + +func (m *GetHeldAmountResponse) GetPaid() int64 { + if m != nil { + return m.Paid + } + return 0 +} + +type GetAllPaystubsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAllPaystubsRequest) Reset() { *m = GetAllPaystubsRequest{} } +func (m *GetAllPaystubsRequest) String() string { return proto.CompactTextString(m) } +func (*GetAllPaystubsRequest) ProtoMessage() {} +func (*GetAllPaystubsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5c5d9f6e3ee97993, []int{2} +} +func (m *GetAllPaystubsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAllPaystubsRequest.Unmarshal(m, b) +} +func (m *GetAllPaystubsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAllPaystubsRequest.Marshal(b, m, deterministic) +} +func (m *GetAllPaystubsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllPaystubsRequest.Merge(m, src) +} +func (m *GetAllPaystubsRequest) XXX_Size() int { + return xxx_messageInfo_GetAllPaystubsRequest.Size(m) +} +func (m *GetAllPaystubsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllPaystubsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAllPaystubsRequest proto.InternalMessageInfo + +type GetAllPaystubsResponse struct { + Paystub []*GetHeldAmountResponse `protobuf:"bytes,1,rep,name=paystub,proto3" json:"paystub,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAllPaystubsResponse) Reset() { *m = GetAllPaystubsResponse{} } +func (m *GetAllPaystubsResponse) String() string { return proto.CompactTextString(m) } +func (*GetAllPaystubsResponse) ProtoMessage() {} +func (*GetAllPaystubsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c5d9f6e3ee97993, []int{3} +} +func (m *GetAllPaystubsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAllPaystubsResponse.Unmarshal(m, b) +} +func (m *GetAllPaystubsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAllPaystubsResponse.Marshal(b, m, deterministic) +} +func (m *GetAllPaystubsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAllPaystubsResponse.Merge(m, src) +} +func (m *GetAllPaystubsResponse) XXX_Size() int { + return xxx_messageInfo_GetAllPaystubsResponse.Size(m) +} +func (m *GetAllPaystubsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAllPaystubsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAllPaystubsResponse proto.InternalMessageInfo + +func (m *GetAllPaystubsResponse) GetPaystub() []*GetHeldAmountResponse { + if m != nil { + return m.Paystub + } + return nil +} + +func init() { + proto.RegisterType((*GetHeldAmountRequest)(nil), "heldamount.GetHeldAmountRequest") + proto.RegisterType((*GetHeldAmountResponse)(nil), "heldamount.GetHeldAmountResponse") + proto.RegisterType((*GetAllPaystubsRequest)(nil), "heldamount.GetAllPaystubsRequest") + proto.RegisterType((*GetAllPaystubsResponse)(nil), "heldamount.GetAllPaystubsResponse") +} + +func init() { proto.RegisterFile("heldamount.proto", fileDescriptor_5c5d9f6e3ee97993) } + +var fileDescriptor_5c5d9f6e3ee97993 = []byte{ + // 583 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xcb, 0x6e, 0x13, 0x3f, + 0x14, 0xc6, 0xff, 0xd3, 0x4b, 0x2e, 0x27, 0x97, 0xf6, 0x6f, 0x52, 0x30, 0x61, 0x91, 0x21, 0xa0, + 0x32, 0xab, 0x44, 0x2a, 0x4b, 0xd8, 0xa4, 0x20, 0x95, 0x6e, 0x50, 0x18, 0x8a, 0x90, 0xd8, 0x44, + 0x93, 0xf8, 0x30, 0x0c, 0xca, 0xc4, 0x66, 0x6c, 0x0b, 0xf5, 0x2d, 0x78, 0x1c, 0x1e, 0x81, 0x67, + 0x60, 0x51, 0xde, 0x04, 0x21, 0xdb, 0xe3, 0x4c, 0x08, 0xa8, 0x08, 0xb1, 0xb3, 0xbf, 0xf3, 0x3b, + 0x9f, 0xcf, 0x97, 0xcc, 0x81, 0xc3, 0x77, 0xb8, 0x64, 0x49, 0xce, 0xf5, 0x4a, 0x8d, 0x44, 0xc1, + 0x15, 0x27, 0x50, 0x29, 0x7d, 0x48, 0x79, 0xca, 0x9d, 0xde, 0x1f, 0xa4, 0x9c, 0xa7, 0x4b, 0x1c, + 0xdb, 0xdb, 0x5c, 0xbf, 0x1d, 0xab, 0x2c, 0x47, 0xa9, 0x92, 0x5c, 0x38, 0x60, 0x78, 0x01, 0xbd, + 0x33, 0x54, 0xcf, 0x70, 0xc9, 0x26, 0xb6, 0x3b, 0xc6, 0x0f, 0x1a, 0xa5, 0x22, 0x8f, 0xa1, 0x26, + 0xb0, 0xc8, 0x38, 0xa3, 0x41, 0x18, 0x44, 0xad, 0x93, 0xfe, 0xc8, 0x39, 0x8d, 0xbc, 0xd3, 0xe8, + 0xc2, 0x3b, 0x9d, 0x36, 0xbe, 0x5c, 0x0d, 0xfe, 0xfb, 0xf4, 0x6d, 0x10, 0xc4, 0x65, 0xcf, 0xf0, + 0xfb, 0x3e, 0x1c, 0x6d, 0xd9, 0x4a, 0xc1, 0x57, 0x12, 0xff, 0xcd, 0x97, 0x3c, 0x80, 0xfa, 0x8a, + 0x33, 0x9c, 0x65, 0x8c, 0xee, 0x84, 0x41, 0xd4, 0x3e, 0xed, 0x1a, 0xe4, 0xeb, 0xd5, 0xa0, 0xf6, + 0x9c, 0x33, 0x3c, 0x7f, 0x1a, 0xd7, 0x4c, 0xf9, 0x9c, 0x91, 0x27, 0x00, 0x8b, 0x02, 0x13, 0x85, + 0x6c, 0x96, 0x28, 0xba, 0xfb, 0x17, 0x4f, 0x35, 0xcb, 0xbe, 0x89, 0x22, 0x3d, 0xd8, 0x5f, 0x70, + 0x86, 0x92, 0xee, 0x85, 0x41, 0xd4, 0x8c, 0xdd, 0x85, 0x0c, 0xa1, 0xa3, 0x65, 0x92, 0xe2, 0x2c, + 0x51, 0xb3, 0x02, 0xa5, 0xa2, 0xfb, 0x61, 0x10, 0xed, 0xc4, 0x2d, 0x2b, 0x4e, 0x4c, 0x52, 0x45, + 0xee, 0x40, 0xd3, 0x31, 0x29, 0x2a, 0x5a, 0x0b, 0x83, 0x68, 0x37, 0x6e, 0x58, 0xe1, 0x0c, 0x37, + 0x8a, 0x42, 0x2b, 0x5a, 0xdf, 0x28, 0x4e, 0xb5, 0x22, 0x11, 0x1c, 0xae, 0x3b, 0x67, 0x05, 0x8a, + 0x24, 0x2b, 0x68, 0xc3, 0x32, 0x5d, 0x6f, 0x10, 0x5b, 0xb5, 0x22, 0x85, 0x5e, 0x93, 0xcd, 0x0d, + 0x72, 0xaa, 0x3d, 0x79, 0x0c, 0x07, 0x95, 0x67, 0xa2, 0x59, 0xa6, 0x28, 0x58, 0xb0, 0xe3, 0x2d, + 0x27, 0x46, 0x24, 0x21, 0xb4, 0x17, 0x3c, 0x17, 0xeb, 0x60, 0x2d, 0x0b, 0x81, 0xd1, 0xca, 0x5c, + 0xb7, 0xa1, 0x61, 0x09, 0x13, 0xab, 0x6d, 0xab, 0x75, 0x73, 0x37, 0xa9, 0x7c, 0xc9, 0x84, 0xea, + 0x54, 0x25, 0x93, 0xe9, 0x18, 0x0e, 0x7c, 0x97, 0x1f, 0xb4, 0xeb, 0xde, 0x2f, 0x9b, 0xab, 0x39, + 0xbd, 0x85, 0xe7, 0x0e, 0x2a, 0xae, 0xca, 0x73, 0x1f, 0xba, 0x6b, 0x3f, 0x17, 0xe7, 0xd0, 0x62, + 0xed, 0xd2, 0xce, 0xa5, 0xb9, 0x07, 0x1d, 0xa9, 0x0b, 0xf3, 0xfb, 0x60, 0xb1, 0xc0, 0x95, 0xa2, + 0xff, 0x3b, 0xc8, 0x8a, 0x53, 0xa7, 0x11, 0x02, 0x7b, 0x66, 0x73, 0x28, 0xb1, 0x35, 0x7b, 0x36, + 0x1a, 0xff, 0x88, 0x8c, 0xde, 0x70, 0x9a, 0x39, 0x93, 0x3e, 0x34, 0x58, 0x26, 0x05, 0x97, 0xc8, + 0x68, 0xcf, 0xfd, 0x65, 0xfe, 0x6e, 0x78, 0x91, 0x64, 0x8c, 0x1e, 0x39, 0xde, 0x9c, 0x87, 0xb7, + 0xec, 0xf7, 0x3f, 0x59, 0x2e, 0xa7, 0xc9, 0xa5, 0x54, 0x7a, 0x2e, 0xcb, 0xbd, 0x1a, 0xbe, 0x82, + 0x9b, 0xdb, 0x85, 0x72, 0x33, 0x1e, 0x41, 0x5d, 0x38, 0x8d, 0x06, 0xe1, 0x6e, 0xd4, 0x3a, 0xb9, + 0x3b, 0xda, 0x58, 0xf3, 0xdf, 0x6e, 0x53, 0xec, 0x3b, 0x4e, 0x3e, 0x07, 0x00, 0x55, 0x9d, 0xbc, + 0x00, 0x38, 0x43, 0x35, 0x4d, 0x2e, 0x5f, 0x2a, 0x3d, 0x27, 0xe1, 0x35, 0x46, 0x76, 0xaa, 0xfe, + 0x9f, 0x9f, 0x22, 0xaf, 0xa1, 0xfb, 0xf3, 0xe0, 0x64, 0xbb, 0xe9, 0xd7, 0xb4, 0xfd, 0xe1, 0x75, + 0x88, 0x33, 0x3e, 0xed, 0xbd, 0x21, 0x52, 0xf1, 0xe2, 0xfd, 0x28, 0xe3, 0xe3, 0x05, 0xcf, 0x73, + 0xbe, 0x1a, 0x8b, 0xf9, 0xbc, 0x66, 0x97, 0xf4, 0xe1, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd5, + 0xec, 0x68, 0xae, 0xeb, 0x04, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCHeldAmountClient interface { + DRPCConn() drpc.Conn + + GetPayStub(ctx context.Context, in *GetHeldAmountRequest) (*GetHeldAmountResponse, error) + GetAllPaystubs(ctx context.Context, in *GetAllPaystubsRequest) (*GetAllPaystubsResponse, error) +} + +type drpcHeldAmountClient struct { + cc drpc.Conn +} + +func NewDRPCHeldAmountClient(cc drpc.Conn) DRPCHeldAmountClient { + return &drpcHeldAmountClient{cc} +} + +func (c *drpcHeldAmountClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcHeldAmountClient) GetPayStub(ctx context.Context, in *GetHeldAmountRequest) (*GetHeldAmountResponse, error) { + out := new(GetHeldAmountResponse) + err := c.cc.Invoke(ctx, "/heldamount.HeldAmount/GetPayStub", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcHeldAmountClient) GetAllPaystubs(ctx context.Context, in *GetAllPaystubsRequest) (*GetAllPaystubsResponse, error) { + out := new(GetAllPaystubsResponse) + err := c.cc.Invoke(ctx, "/heldamount.HeldAmount/GetAllPaystubs", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCHeldAmountServer interface { + GetPayStub(context.Context, *GetHeldAmountRequest) (*GetHeldAmountResponse, error) + GetAllPaystubs(context.Context, *GetAllPaystubsRequest) (*GetAllPaystubsResponse, error) +} + +type DRPCHeldAmountDescription struct{} + +func (DRPCHeldAmountDescription) NumMethods() int { return 2 } + +func (DRPCHeldAmountDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/heldamount.HeldAmount/GetPayStub", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCHeldAmountServer). + GetPayStub( + ctx, + in1.(*GetHeldAmountRequest), + ) + }, DRPCHeldAmountServer.GetPayStub, true + case 1: + return "/heldamount.HeldAmount/GetAllPaystubs", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCHeldAmountServer). + GetAllPaystubs( + ctx, + in1.(*GetAllPaystubsRequest), + ) + }, DRPCHeldAmountServer.GetAllPaystubs, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterHeldAmount(mux drpc.Mux, impl DRPCHeldAmountServer) error { + return mux.Register(impl, DRPCHeldAmountDescription{}) +} + +type DRPCHeldAmount_GetPayStubStream interface { + drpc.Stream + SendAndClose(*GetHeldAmountResponse) error +} + +type drpcHeldAmountGetPayStubStream struct { + drpc.Stream +} + +func (x *drpcHeldAmountGetPayStubStream) SendAndClose(m *GetHeldAmountResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCHeldAmount_GetAllPaystubsStream interface { + drpc.Stream + SendAndClose(*GetAllPaystubsResponse) error +} + +type drpcHeldAmountGetAllPaystubsStream struct { + drpc.Stream +} + +func (x *drpcHeldAmountGetAllPaystubsStream) SendAndClose(m *GetAllPaystubsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/heldamount.proto b/vendor/storj.io/common/pb/heldamount.proto new file mode 100644 index 000000000..49810ac38 --- /dev/null +++ b/vendor/storj.io/common/pb/heldamount.proto @@ -0,0 +1,49 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +package heldamount; + +service HeldAmount { + rpc GetPayStub(GetHeldAmountRequest) returns (GetHeldAmountResponse); + rpc GetAllPaystubs(GetAllPaystubsRequest) returns (GetAllPaystubsResponse); +} + +message GetHeldAmountRequest { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message GetHeldAmountResponse { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + string codes = 4; + float usage_at_rest = 5; + int64 usage_get = 6; + int64 usage_put = 7; + int64 usage_get_repair = 8; + int64 usage_put_repair = 9; + int64 usage_get_audit = 10; + int64 comp_at_rest = 11; + int64 comp_get = 12; + int64 comp_put = 13; + int64 comp_get_repair = 14; + int64 comp_put_repair = 15; + int64 comp_get_audit = 16; + int64 surge_percent = 17; + int64 held = 18; + int64 owed = 19; + int64 disposed = 20; + int64 paid = 21; +} + +message GetAllPaystubsRequest {} + +message GetAllPaystubsResponse { + repeated GetHeldAmountResponse paystub = 1; +} \ No newline at end of file diff --git a/vendor/storj.io/common/pb/inspector.pb.go b/vendor/storj.io/common/pb/inspector.pb.go new file mode 100644 index 000000000..0b179f632 --- /dev/null +++ b/vendor/storj.io/common/pb/inspector.pb.go @@ -0,0 +1,1348 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: inspector.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// ListSegments +type ListIrreparableSegmentsRequest struct { + Limit int32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + LastSeenSegmentPath []byte `protobuf:"bytes,2,opt,name=last_seen_segment_path,json=lastSeenSegmentPath,proto3" json:"last_seen_segment_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListIrreparableSegmentsRequest) Reset() { *m = ListIrreparableSegmentsRequest{} } +func (m *ListIrreparableSegmentsRequest) String() string { return proto.CompactTextString(m) } +func (*ListIrreparableSegmentsRequest) ProtoMessage() {} +func (*ListIrreparableSegmentsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{0} +} +func (m *ListIrreparableSegmentsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListIrreparableSegmentsRequest.Unmarshal(m, b) +} +func (m *ListIrreparableSegmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListIrreparableSegmentsRequest.Marshal(b, m, deterministic) +} +func (m *ListIrreparableSegmentsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListIrreparableSegmentsRequest.Merge(m, src) +} +func (m *ListIrreparableSegmentsRequest) XXX_Size() int { + return xxx_messageInfo_ListIrreparableSegmentsRequest.Size(m) +} +func (m *ListIrreparableSegmentsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListIrreparableSegmentsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListIrreparableSegmentsRequest proto.InternalMessageInfo + +func (m *ListIrreparableSegmentsRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ListIrreparableSegmentsRequest) GetLastSeenSegmentPath() []byte { + if m != nil { + return m.LastSeenSegmentPath + } + return nil +} + +type IrreparableSegment struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + SegmentDetail *Pointer `protobuf:"bytes,2,opt,name=segment_detail,json=segmentDetail,proto3" json:"segment_detail,omitempty"` + LostPieces int32 `protobuf:"varint,3,opt,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"` + LastRepairAttempt int64 `protobuf:"varint,4,opt,name=last_repair_attempt,json=lastRepairAttempt,proto3" json:"last_repair_attempt,omitempty"` + RepairAttemptCount int64 `protobuf:"varint,5,opt,name=repair_attempt_count,json=repairAttemptCount,proto3" json:"repair_attempt_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IrreparableSegment) Reset() { *m = IrreparableSegment{} } +func (m *IrreparableSegment) String() string { return proto.CompactTextString(m) } +func (*IrreparableSegment) ProtoMessage() {} +func (*IrreparableSegment) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{1} +} +func (m *IrreparableSegment) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IrreparableSegment.Unmarshal(m, b) +} +func (m *IrreparableSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IrreparableSegment.Marshal(b, m, deterministic) +} +func (m *IrreparableSegment) XXX_Merge(src proto.Message) { + xxx_messageInfo_IrreparableSegment.Merge(m, src) +} +func (m *IrreparableSegment) XXX_Size() int { + return xxx_messageInfo_IrreparableSegment.Size(m) +} +func (m *IrreparableSegment) XXX_DiscardUnknown() { + xxx_messageInfo_IrreparableSegment.DiscardUnknown(m) +} + +var xxx_messageInfo_IrreparableSegment proto.InternalMessageInfo + +func (m *IrreparableSegment) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *IrreparableSegment) GetSegmentDetail() *Pointer { + if m != nil { + return m.SegmentDetail + } + return nil +} + +func (m *IrreparableSegment) GetLostPieces() int32 { + if m != nil { + return m.LostPieces + } + return 0 +} + +func (m *IrreparableSegment) GetLastRepairAttempt() int64 { + if m != nil { + return m.LastRepairAttempt + } + return 0 +} + +func (m *IrreparableSegment) GetRepairAttemptCount() int64 { + if m != nil { + return m.RepairAttemptCount + } + return 0 +} + +type ListIrreparableSegmentsResponse struct { + Segments []*IrreparableSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListIrreparableSegmentsResponse) Reset() { *m = ListIrreparableSegmentsResponse{} } +func (m *ListIrreparableSegmentsResponse) String() string { return proto.CompactTextString(m) } +func (*ListIrreparableSegmentsResponse) ProtoMessage() {} +func (*ListIrreparableSegmentsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{2} +} +func (m *ListIrreparableSegmentsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListIrreparableSegmentsResponse.Unmarshal(m, b) +} +func (m *ListIrreparableSegmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListIrreparableSegmentsResponse.Marshal(b, m, deterministic) +} +func (m *ListIrreparableSegmentsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListIrreparableSegmentsResponse.Merge(m, src) +} +func (m *ListIrreparableSegmentsResponse) XXX_Size() int { + return xxx_messageInfo_ListIrreparableSegmentsResponse.Size(m) +} +func (m *ListIrreparableSegmentsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListIrreparableSegmentsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListIrreparableSegmentsResponse proto.InternalMessageInfo + +func (m *ListIrreparableSegmentsResponse) GetSegments() []*IrreparableSegment { + if m != nil { + return m.Segments + } + return nil +} + +// CountNodes +type CountNodesResponse struct { + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CountNodesResponse) Reset() { *m = CountNodesResponse{} } +func (m *CountNodesResponse) String() string { return proto.CompactTextString(m) } +func (*CountNodesResponse) ProtoMessage() {} +func (*CountNodesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{3} +} +func (m *CountNodesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CountNodesResponse.Unmarshal(m, b) +} +func (m *CountNodesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CountNodesResponse.Marshal(b, m, deterministic) +} +func (m *CountNodesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CountNodesResponse.Merge(m, src) +} +func (m *CountNodesResponse) XXX_Size() int { + return xxx_messageInfo_CountNodesResponse.Size(m) +} +func (m *CountNodesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CountNodesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CountNodesResponse proto.InternalMessageInfo + +func (m *CountNodesResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +type CountNodesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CountNodesRequest) Reset() { *m = CountNodesRequest{} } +func (m *CountNodesRequest) String() string { return proto.CompactTextString(m) } +func (*CountNodesRequest) ProtoMessage() {} +func (*CountNodesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{4} +} +func (m *CountNodesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CountNodesRequest.Unmarshal(m, b) +} +func (m *CountNodesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CountNodesRequest.Marshal(b, m, deterministic) +} +func (m *CountNodesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CountNodesRequest.Merge(m, src) +} +func (m *CountNodesRequest) XXX_Size() int { + return xxx_messageInfo_CountNodesRequest.Size(m) +} +func (m *CountNodesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CountNodesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CountNodesRequest proto.InternalMessageInfo + +type DumpNodesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpNodesRequest) Reset() { *m = DumpNodesRequest{} } +func (m *DumpNodesRequest) String() string { return proto.CompactTextString(m) } +func (*DumpNodesRequest) ProtoMessage() {} +func (*DumpNodesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{5} +} +func (m *DumpNodesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpNodesRequest.Unmarshal(m, b) +} +func (m *DumpNodesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpNodesRequest.Marshal(b, m, deterministic) +} +func (m *DumpNodesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpNodesRequest.Merge(m, src) +} +func (m *DumpNodesRequest) XXX_Size() int { + return xxx_messageInfo_DumpNodesRequest.Size(m) +} +func (m *DumpNodesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DumpNodesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpNodesRequest proto.InternalMessageInfo + +type DumpNodesResponse struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DumpNodesResponse) Reset() { *m = DumpNodesResponse{} } +func (m *DumpNodesResponse) String() string { return proto.CompactTextString(m) } +func (*DumpNodesResponse) ProtoMessage() {} +func (*DumpNodesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{6} +} +func (m *DumpNodesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DumpNodesResponse.Unmarshal(m, b) +} +func (m *DumpNodesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DumpNodesResponse.Marshal(b, m, deterministic) +} +func (m *DumpNodesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DumpNodesResponse.Merge(m, src) +} +func (m *DumpNodesResponse) XXX_Size() int { + return xxx_messageInfo_DumpNodesResponse.Size(m) +} +func (m *DumpNodesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DumpNodesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DumpNodesResponse proto.InternalMessageInfo + +func (m *DumpNodesResponse) GetNodes() []*Node { + if m != nil { + return m.Nodes + } + return nil +} + +type StatsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatsRequest) Reset() { *m = StatsRequest{} } +func (m *StatsRequest) String() string { return proto.CompactTextString(m) } +func (*StatsRequest) ProtoMessage() {} +func (*StatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{7} +} +func (m *StatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatsRequest.Unmarshal(m, b) +} +func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) +} +func (m *StatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatsRequest.Merge(m, src) +} +func (m *StatsRequest) XXX_Size() int { + return xxx_messageInfo_StatsRequest.Size(m) +} +func (m *StatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatsRequest proto.InternalMessageInfo + +type StatSummaryResponse struct { + UsedSpace int64 `protobuf:"varint,1,opt,name=used_space,json=usedSpace,proto3" json:"used_space,omitempty"` + AvailableSpace int64 `protobuf:"varint,2,opt,name=available_space,json=availableSpace,proto3" json:"available_space,omitempty"` + UsedIngress int64 `protobuf:"varint,3,opt,name=used_ingress,json=usedIngress,proto3" json:"used_ingress,omitempty"` + UsedEgress int64 `protobuf:"varint,4,opt,name=used_egress,json=usedEgress,proto3" json:"used_egress,omitempty"` + UsedBandwidth int64 `protobuf:"varint,5,opt,name=used_bandwidth,json=usedBandwidth,proto3" json:"used_bandwidth,omitempty"` + AvailableBandwidth int64 `protobuf:"varint,6,opt,name=available_bandwidth,json=availableBandwidth,proto3" json:"available_bandwidth,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatSummaryResponse) Reset() { *m = StatSummaryResponse{} } +func (m *StatSummaryResponse) String() string { return proto.CompactTextString(m) } +func (*StatSummaryResponse) ProtoMessage() {} +func (*StatSummaryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{8} +} +func (m *StatSummaryResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatSummaryResponse.Unmarshal(m, b) +} +func (m *StatSummaryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatSummaryResponse.Marshal(b, m, deterministic) +} +func (m *StatSummaryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatSummaryResponse.Merge(m, src) +} +func (m *StatSummaryResponse) XXX_Size() int { + return xxx_messageInfo_StatSummaryResponse.Size(m) +} +func (m *StatSummaryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatSummaryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatSummaryResponse proto.InternalMessageInfo + +func (m *StatSummaryResponse) GetUsedSpace() int64 { + if m != nil { + return m.UsedSpace + } + return 0 +} + +func (m *StatSummaryResponse) GetAvailableSpace() int64 { + if m != nil { + return m.AvailableSpace + } + return 0 +} + +func (m *StatSummaryResponse) GetUsedIngress() int64 { + if m != nil { + return m.UsedIngress + } + return 0 +} + +func (m *StatSummaryResponse) GetUsedEgress() int64 { + if m != nil { + return m.UsedEgress + } + return 0 +} + +func (m *StatSummaryResponse) GetUsedBandwidth() int64 { + if m != nil { + return m.UsedBandwidth + } + return 0 +} + +func (m *StatSummaryResponse) GetAvailableBandwidth() int64 { + if m != nil { + return m.AvailableBandwidth + } + return 0 +} + +type DashboardRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DashboardRequest) Reset() { *m = DashboardRequest{} } +func (m *DashboardRequest) String() string { return proto.CompactTextString(m) } +func (*DashboardRequest) ProtoMessage() {} +func (*DashboardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{9} +} +func (m *DashboardRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DashboardRequest.Unmarshal(m, b) +} +func (m *DashboardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DashboardRequest.Marshal(b, m, deterministic) +} +func (m *DashboardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DashboardRequest.Merge(m, src) +} +func (m *DashboardRequest) XXX_Size() int { + return xxx_messageInfo_DashboardRequest.Size(m) +} +func (m *DashboardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DashboardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DashboardRequest proto.InternalMessageInfo + +type DashboardResponse struct { + NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + NodeConnections int64 `protobuf:"varint,2,opt,name=node_connections,json=nodeConnections,proto3" json:"node_connections,omitempty"` + BootstrapAddress string `protobuf:"bytes,3,opt,name=bootstrap_address,json=bootstrapAddress,proto3" json:"bootstrap_address,omitempty"` // Deprecated: Do not use. + InternalAddress string `protobuf:"bytes,4,opt,name=internal_address,json=internalAddress,proto3" json:"internal_address,omitempty"` + ExternalAddress string `protobuf:"bytes,5,opt,name=external_address,json=externalAddress,proto3" json:"external_address,omitempty"` + DashboardAddress string `protobuf:"bytes,6,opt,name=dashboard_address,json=dashboardAddress,proto3" json:"dashboard_address,omitempty"` + Stats *StatSummaryResponse `protobuf:"bytes,7,opt,name=stats,proto3" json:"stats,omitempty"` + Uptime string `protobuf:"bytes,8,opt,name=uptime,proto3" json:"uptime,omitempty"` + LastPinged time.Time `protobuf:"bytes,9,opt,name=last_pinged,json=lastPinged,proto3,stdtime" json:"last_pinged"` + LastQueried time.Time `protobuf:"bytes,10,opt,name=last_queried,json=lastQueried,proto3,stdtime" json:"last_queried"` + LastPingFromId *NodeID `protobuf:"bytes,11,opt,name=last_ping_from_id,json=lastPingFromId,proto3,customtype=NodeID" json:"last_ping_from_id,omitempty"` + LastPingFromAddress string `protobuf:"bytes,12,opt,name=last_ping_from_address,json=lastPingFromAddress,proto3" json:"last_ping_from_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DashboardResponse) Reset() { *m = DashboardResponse{} } +func (m *DashboardResponse) String() string { return proto.CompactTextString(m) } +func (*DashboardResponse) ProtoMessage() {} +func (*DashboardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{10} +} +func (m *DashboardResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DashboardResponse.Unmarshal(m, b) +} +func (m *DashboardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DashboardResponse.Marshal(b, m, deterministic) +} +func (m *DashboardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DashboardResponse.Merge(m, src) +} +func (m *DashboardResponse) XXX_Size() int { + return xxx_messageInfo_DashboardResponse.Size(m) +} +func (m *DashboardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DashboardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DashboardResponse proto.InternalMessageInfo + +func (m *DashboardResponse) GetNodeConnections() int64 { + if m != nil { + return m.NodeConnections + } + return 0 +} + +// Deprecated: Do not use. +func (m *DashboardResponse) GetBootstrapAddress() string { + if m != nil { + return m.BootstrapAddress + } + return "" +} + +func (m *DashboardResponse) GetInternalAddress() string { + if m != nil { + return m.InternalAddress + } + return "" +} + +func (m *DashboardResponse) GetExternalAddress() string { + if m != nil { + return m.ExternalAddress + } + return "" +} + +func (m *DashboardResponse) GetDashboardAddress() string { + if m != nil { + return m.DashboardAddress + } + return "" +} + +func (m *DashboardResponse) GetStats() *StatSummaryResponse { + if m != nil { + return m.Stats + } + return nil +} + +func (m *DashboardResponse) GetUptime() string { + if m != nil { + return m.Uptime + } + return "" +} + +func (m *DashboardResponse) GetLastPinged() time.Time { + if m != nil { + return m.LastPinged + } + return time.Time{} +} + +func (m *DashboardResponse) GetLastQueried() time.Time { + if m != nil { + return m.LastQueried + } + return time.Time{} +} + +func (m *DashboardResponse) GetLastPingFromAddress() string { + if m != nil { + return m.LastPingFromAddress + } + return "" +} + +type SegmentHealthRequest struct { + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + SegmentIndex int64 `protobuf:"varint,3,opt,name=segment_index,json=segmentIndex,proto3" json:"segment_index,omitempty"` + ProjectId []byte `protobuf:"bytes,4,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentHealthRequest) Reset() { *m = SegmentHealthRequest{} } +func (m *SegmentHealthRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentHealthRequest) ProtoMessage() {} +func (*SegmentHealthRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{11} +} +func (m *SegmentHealthRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentHealthRequest.Unmarshal(m, b) +} +func (m *SegmentHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentHealthRequest.Marshal(b, m, deterministic) +} +func (m *SegmentHealthRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentHealthRequest.Merge(m, src) +} +func (m *SegmentHealthRequest) XXX_Size() int { + return xxx_messageInfo_SegmentHealthRequest.Size(m) +} +func (m *SegmentHealthRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentHealthRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentHealthRequest proto.InternalMessageInfo + +func (m *SegmentHealthRequest) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SegmentHealthRequest) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *SegmentHealthRequest) GetSegmentIndex() int64 { + if m != nil { + return m.SegmentIndex + } + return 0 +} + +func (m *SegmentHealthRequest) GetProjectId() []byte { + if m != nil { + return m.ProjectId + } + return nil +} + +type SegmentHealth struct { + HealthyIds []NodeID `protobuf:"bytes,1,rep,name=healthy_ids,json=healthyIds,proto3,customtype=NodeID" json:"healthy_ids,omitempty"` + UnhealthyIds []NodeID `protobuf:"bytes,2,rep,name=unhealthy_ids,json=unhealthyIds,proto3,customtype=NodeID" json:"unhealthy_ids,omitempty"` + OfflineIds []NodeID `protobuf:"bytes,3,rep,name=offline_ids,json=offlineIds,proto3,customtype=NodeID" json:"offline_ids,omitempty"` + Segment []byte `protobuf:"bytes,4,opt,name=segment,proto3" json:"segment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentHealth) Reset() { *m = SegmentHealth{} } +func (m *SegmentHealth) String() string { return proto.CompactTextString(m) } +func (*SegmentHealth) ProtoMessage() {} +func (*SegmentHealth) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{12} +} +func (m *SegmentHealth) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentHealth.Unmarshal(m, b) +} +func (m *SegmentHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentHealth.Marshal(b, m, deterministic) +} +func (m *SegmentHealth) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentHealth.Merge(m, src) +} +func (m *SegmentHealth) XXX_Size() int { + return xxx_messageInfo_SegmentHealth.Size(m) +} +func (m *SegmentHealth) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentHealth.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentHealth proto.InternalMessageInfo + +func (m *SegmentHealth) GetSegment() []byte { + if m != nil { + return m.Segment + } + return nil +} + +type SegmentHealthResponse struct { + Health *SegmentHealth `protobuf:"bytes,1,opt,name=health,proto3" json:"health,omitempty"` + Redundancy *RedundancyScheme `protobuf:"bytes,2,opt,name=redundancy,proto3" json:"redundancy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentHealthResponse) Reset() { *m = SegmentHealthResponse{} } +func (m *SegmentHealthResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentHealthResponse) ProtoMessage() {} +func (*SegmentHealthResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{13} +} +func (m *SegmentHealthResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentHealthResponse.Unmarshal(m, b) +} +func (m *SegmentHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentHealthResponse.Marshal(b, m, deterministic) +} +func (m *SegmentHealthResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentHealthResponse.Merge(m, src) +} +func (m *SegmentHealthResponse) XXX_Size() int { + return xxx_messageInfo_SegmentHealthResponse.Size(m) +} +func (m *SegmentHealthResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentHealthResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentHealthResponse proto.InternalMessageInfo + +func (m *SegmentHealthResponse) GetHealth() *SegmentHealth { + if m != nil { + return m.Health + } + return nil +} + +func (m *SegmentHealthResponse) GetRedundancy() *RedundancyScheme { + if m != nil { + return m.Redundancy + } + return nil +} + +type ObjectHealthRequest struct { + EncryptedPath []byte `protobuf:"bytes,1,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Bucket []byte `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` + ProjectId []byte `protobuf:"bytes,3,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + StartAfterSegment int64 `protobuf:"varint,4,opt,name=start_after_segment,json=startAfterSegment,proto3" json:"start_after_segment,omitempty"` + EndBeforeSegment int64 `protobuf:"varint,5,opt,name=end_before_segment,json=endBeforeSegment,proto3" json:"end_before_segment,omitempty"` + Limit int32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectHealthRequest) Reset() { *m = ObjectHealthRequest{} } +func (m *ObjectHealthRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectHealthRequest) ProtoMessage() {} +func (*ObjectHealthRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{14} +} +func (m *ObjectHealthRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectHealthRequest.Unmarshal(m, b) +} +func (m *ObjectHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectHealthRequest.Marshal(b, m, deterministic) +} +func (m *ObjectHealthRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectHealthRequest.Merge(m, src) +} +func (m *ObjectHealthRequest) XXX_Size() int { + return xxx_messageInfo_ObjectHealthRequest.Size(m) +} +func (m *ObjectHealthRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectHealthRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectHealthRequest proto.InternalMessageInfo + +func (m *ObjectHealthRequest) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *ObjectHealthRequest) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ObjectHealthRequest) GetProjectId() []byte { + if m != nil { + return m.ProjectId + } + return nil +} + +func (m *ObjectHealthRequest) GetStartAfterSegment() int64 { + if m != nil { + return m.StartAfterSegment + } + return 0 +} + +func (m *ObjectHealthRequest) GetEndBeforeSegment() int64 { + if m != nil { + return m.EndBeforeSegment + } + return 0 +} + +func (m *ObjectHealthRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +type ObjectHealthResponse struct { + Segments []*SegmentHealth `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"` + Redundancy *RedundancyScheme `protobuf:"bytes,2,opt,name=redundancy,proto3" json:"redundancy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectHealthResponse) Reset() { *m = ObjectHealthResponse{} } +func (m *ObjectHealthResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectHealthResponse) ProtoMessage() {} +func (*ObjectHealthResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a07d9034b2dd9d26, []int{15} +} +func (m *ObjectHealthResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectHealthResponse.Unmarshal(m, b) +} +func (m *ObjectHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectHealthResponse.Marshal(b, m, deterministic) +} +func (m *ObjectHealthResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectHealthResponse.Merge(m, src) +} +func (m *ObjectHealthResponse) XXX_Size() int { + return xxx_messageInfo_ObjectHealthResponse.Size(m) +} +func (m *ObjectHealthResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectHealthResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectHealthResponse proto.InternalMessageInfo + +func (m *ObjectHealthResponse) GetSegments() []*SegmentHealth { + if m != nil { + return m.Segments + } + return nil +} + +func (m *ObjectHealthResponse) GetRedundancy() *RedundancyScheme { + if m != nil { + return m.Redundancy + } + return nil +} + +func init() { + proto.RegisterType((*ListIrreparableSegmentsRequest)(nil), "inspector.ListIrreparableSegmentsRequest") + proto.RegisterType((*IrreparableSegment)(nil), "inspector.IrreparableSegment") + proto.RegisterType((*ListIrreparableSegmentsResponse)(nil), "inspector.ListIrreparableSegmentsResponse") + proto.RegisterType((*CountNodesResponse)(nil), "inspector.CountNodesResponse") + proto.RegisterType((*CountNodesRequest)(nil), "inspector.CountNodesRequest") + proto.RegisterType((*DumpNodesRequest)(nil), "inspector.DumpNodesRequest") + proto.RegisterType((*DumpNodesResponse)(nil), "inspector.DumpNodesResponse") + proto.RegisterType((*StatsRequest)(nil), "inspector.StatsRequest") + proto.RegisterType((*StatSummaryResponse)(nil), "inspector.StatSummaryResponse") + proto.RegisterType((*DashboardRequest)(nil), "inspector.DashboardRequest") + proto.RegisterType((*DashboardResponse)(nil), "inspector.DashboardResponse") + proto.RegisterType((*SegmentHealthRequest)(nil), "inspector.SegmentHealthRequest") + proto.RegisterType((*SegmentHealth)(nil), "inspector.SegmentHealth") + proto.RegisterType((*SegmentHealthResponse)(nil), "inspector.SegmentHealthResponse") + proto.RegisterType((*ObjectHealthRequest)(nil), "inspector.ObjectHealthRequest") + proto.RegisterType((*ObjectHealthResponse)(nil), "inspector.ObjectHealthResponse") +} + +func init() { proto.RegisterFile("inspector.proto", fileDescriptor_a07d9034b2dd9d26) } + +var fileDescriptor_a07d9034b2dd9d26 = []byte{ + // 1247 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x8e, 0x1b, 0x45, + 0x10, 0xce, 0xac, 0xd7, 0x4e, 0x5c, 0xf6, 0xfe, 0xb8, 0x6d, 0x92, 0x91, 0xf3, 0xe3, 0x65, 0x10, + 0xca, 0x26, 0x41, 0xde, 0x68, 0x93, 0x1c, 0x22, 0x4e, 0x71, 0x7e, 0x88, 0x25, 0x44, 0x36, 0xe3, + 0x9c, 0x10, 0xd2, 0xa8, 0xed, 0x6e, 0xdb, 0x93, 0x78, 0xba, 0x27, 0x3d, 0xed, 0x90, 0x7d, 0x01, + 0x04, 0x37, 0xae, 0x9c, 0x11, 0x6f, 0xc0, 0x89, 0x27, 0xe0, 0x19, 0x38, 0x84, 0x13, 0xf0, 0x0c, + 0xdc, 0x50, 0xff, 0x4c, 0x7b, 0x6c, 0xaf, 0xf9, 0x11, 0xb7, 0x99, 0xaf, 0xbe, 0xaa, 0xa9, 0xfe, + 0xaa, 0xba, 0x6a, 0x60, 0x2f, 0x66, 0x59, 0x4a, 0x47, 0x92, 0x8b, 0x6e, 0x2a, 0xb8, 0xe4, 0xa8, + 0xea, 0x80, 0x36, 0x4c, 0xf8, 0x84, 0x1b, 0xb8, 0x0d, 0x8c, 0x13, 0x6a, 0x9f, 0xf7, 0x52, 0x1e, + 0x33, 0x49, 0x05, 0x19, 0x5a, 0xa0, 0x33, 0xe1, 0x7c, 0x32, 0xa3, 0x47, 0xfa, 0x6d, 0x38, 0x1f, + 0x1f, 0xc9, 0x38, 0xa1, 0x99, 0xc4, 0x49, 0x6a, 0x08, 0xc1, 0x2b, 0xb8, 0xf6, 0x69, 0x9c, 0xc9, + 0xbe, 0x10, 0x34, 0xc5, 0x02, 0x0f, 0x67, 0x74, 0x40, 0x27, 0x09, 0x65, 0x32, 0x0b, 0xe9, 0xeb, + 0x39, 0xcd, 0x24, 0x6a, 0x41, 0x79, 0x16, 0x27, 0xb1, 0xf4, 0xbd, 0x03, 0xef, 0xb0, 0x1c, 0x9a, + 0x17, 0x74, 0x07, 0x2e, 0xce, 0x70, 0x26, 0xa3, 0x8c, 0x52, 0x16, 0x65, 0xc6, 0x25, 0x4a, 0xb1, + 0x9c, 0xfa, 0x5b, 0x07, 0xde, 0x61, 0x3d, 0x6c, 0x2a, 0xeb, 0x80, 0x52, 0x66, 0xc3, 0x9d, 0x60, + 0x39, 0x0d, 0x7e, 0xf7, 0x00, 0xad, 0x7f, 0x09, 0x21, 0xd8, 0xd6, 0x9e, 0x9e, 0xf6, 0xd4, 0xcf, + 0xe8, 0x3e, 0xec, 0xe6, 0x51, 0x09, 0x95, 0x38, 0x9e, 0xe9, 0xb8, 0xb5, 0x63, 0xd4, 0x5d, 0x1c, + 0xf1, 0xc4, 0x3c, 0x85, 0x3b, 0x96, 0xf9, 0x48, 0x13, 0x51, 0x07, 0x6a, 0x33, 0x9e, 0xc9, 0x28, + 0x8d, 0xe9, 0x88, 0x66, 0x7e, 0x49, 0xa7, 0x0d, 0x0a, 0x3a, 0xd1, 0x08, 0xea, 0x82, 0xce, 0x2e, + 0x52, 0x89, 0xc4, 0x22, 0xc2, 0x52, 0xd2, 0x24, 0x95, 0xfe, 0xf6, 0x81, 0x77, 0x58, 0x0a, 0x1b, + 0xca, 0x14, 0x6a, 0xcb, 0x03, 0x63, 0x40, 0xb7, 0xa1, 0xb5, 0x4c, 0x8d, 0x46, 0x7c, 0xce, 0xa4, + 0x5f, 0xd6, 0x0e, 0x48, 0x14, 0xc9, 0x0f, 0x95, 0x25, 0xf8, 0x02, 0x3a, 0x1b, 0x55, 0xcd, 0x52, + 0xce, 0x32, 0x8a, 0xee, 0xc3, 0x05, 0x9b, 0x76, 0xe6, 0x7b, 0x07, 0xa5, 0xc3, 0xda, 0xf1, 0xd5, + 0xee, 0xa2, 0xe2, 0xeb, 0x9e, 0xa1, 0xa3, 0x07, 0x37, 0x01, 0xe9, 0xcf, 0x7c, 0xc6, 0x09, 0x5d, + 0x04, 0x6c, 0x41, 0xd9, 0xa4, 0xe5, 0xe9, 0xb4, 0xcc, 0x4b, 0xd0, 0x84, 0x46, 0x91, 0xab, 0x4b, + 0x1a, 0x20, 0xd8, 0x7f, 0x34, 0x4f, 0xd2, 0x25, 0xec, 0x1e, 0x34, 0x0a, 0x98, 0x8d, 0x79, 0x00, + 0x65, 0xd5, 0x5d, 0x79, 0x86, 0xd0, 0xd5, 0xbd, 0xa6, 0x38, 0xa1, 0x31, 0x04, 0xbb, 0x50, 0x1f, + 0x48, 0xec, 0xba, 0x25, 0xf8, 0xd3, 0x83, 0xa6, 0x02, 0x06, 0xf3, 0x24, 0xc1, 0xe2, 0xd4, 0x45, + 0xba, 0x0a, 0x30, 0xcf, 0x28, 0x89, 0xb2, 0x14, 0x8f, 0xa8, 0x4d, 0xb1, 0xaa, 0x90, 0x81, 0x02, + 0xd0, 0x75, 0xd8, 0xc3, 0x6f, 0x70, 0x3c, 0x53, 0x07, 0xb6, 0x9c, 0x2d, 0xcd, 0xd9, 0x75, 0xb0, + 0x21, 0xbe, 0x0f, 0x75, 0x1d, 0x27, 0x66, 0x13, 0x41, 0x33, 0x53, 0xdd, 0x52, 0x58, 0x53, 0x58, + 0xdf, 0x40, 0xaa, 0xfe, 0x9a, 0x42, 0x0d, 0xc3, 0x94, 0x55, 0x7f, 0xfd, 0xb1, 0x21, 0x7c, 0x08, + 0xbb, 0x9a, 0x30, 0xc4, 0x8c, 0x7c, 0x19, 0x13, 0x39, 0xb5, 0x95, 0xdc, 0x51, 0x68, 0x2f, 0x07, + 0xd1, 0x11, 0x34, 0x17, 0x39, 0x2d, 0xb8, 0x15, 0x53, 0x75, 0x67, 0x72, 0x0e, 0x5a, 0x56, 0x9c, + 0x4d, 0x87, 0x1c, 0x0b, 0x92, 0xeb, 0xf1, 0xdb, 0x36, 0x34, 0x0a, 0xa0, 0x55, 0xe3, 0x3a, 0x9c, + 0x57, 0xf2, 0x45, 0x31, 0x31, 0x4d, 0xdf, 0xdb, 0xfd, 0xf9, 0x5d, 0xe7, 0xdc, 0x2f, 0xef, 0x3a, + 0x15, 0xa5, 0x6d, 0xff, 0x51, 0x58, 0x51, 0xe6, 0x3e, 0x41, 0x37, 0x60, 0x5f, 0x13, 0x47, 0x9c, + 0x31, 0x3a, 0x92, 0x31, 0x67, 0x99, 0x15, 0x66, 0x4f, 0xe1, 0x0f, 0x17, 0x30, 0x3a, 0x82, 0xc6, + 0x90, 0x73, 0x99, 0x49, 0x81, 0xd3, 0x08, 0x13, 0xe2, 0xe4, 0xa9, 0xf6, 0xb6, 0x7c, 0x2f, 0xdc, + 0x77, 0xc6, 0x07, 0xc6, 0xa6, 0x62, 0xeb, 0xfb, 0xc3, 0xf0, 0xcc, 0xf1, 0x95, 0x58, 0xd5, 0x70, + 0x2f, 0xc7, 0x0b, 0x54, 0xfa, 0x76, 0x85, 0x5a, 0x36, 0xd4, 0x1c, 0xcf, 0xa9, 0xb7, 0xa0, 0x41, + 0xf2, 0xf3, 0x3a, 0x6e, 0x45, 0x73, 0xf7, 0x9d, 0x21, 0x27, 0xdf, 0x85, 0x72, 0xa6, 0xba, 0xc7, + 0x3f, 0xaf, 0x2f, 0xf7, 0xb5, 0xc2, 0x0d, 0x38, 0xa3, 0x89, 0x42, 0x43, 0x46, 0x17, 0xa1, 0x32, + 0x4f, 0xd5, 0x20, 0xf3, 0x2f, 0xe8, 0xb8, 0xf6, 0x0d, 0x3d, 0x86, 0x9a, 0xbe, 0xd7, 0x69, 0xcc, + 0x26, 0x94, 0xf8, 0x55, 0x1d, 0xb3, 0xdd, 0x35, 0x23, 0xb0, 0x9b, 0x8f, 0xc0, 0xee, 0x8b, 0x7c, + 0x04, 0xf6, 0x2e, 0x28, 0xd5, 0xbf, 0xfd, 0xb5, 0xe3, 0x85, 0xa0, 0x1c, 0x4f, 0xb4, 0x1f, 0xfa, + 0x04, 0xea, 0x3a, 0xcc, 0xeb, 0x39, 0x15, 0x31, 0x25, 0x3e, 0xfc, 0x87, 0x38, 0x3a, 0x81, 0xe7, + 0xc6, 0x11, 0xdd, 0x83, 0x86, 0xcb, 0x27, 0x1a, 0x0b, 0x9e, 0xa8, 0x7a, 0xd7, 0x74, 0xbd, 0xa1, + 0x50, 0xeb, 0xdd, 0xfc, 0xdb, 0x4f, 0x04, 0x4f, 0xfa, 0xc4, 0x8d, 0xd6, 0x85, 0x5b, 0x2e, 0x63, + 0x5d, 0x1f, 0xb7, 0x59, 0xe4, 0x5b, 0x25, 0x83, 0xef, 0x3c, 0x68, 0xd9, 0x49, 0xf1, 0x94, 0xe2, + 0x99, 0x9c, 0xe6, 0xe3, 0xfb, 0x22, 0x54, 0x86, 0xf3, 0xd1, 0x2b, 0x2a, 0xed, 0x78, 0xb5, 0x6f, + 0xea, 0x12, 0x50, 0x36, 0x12, 0xa7, 0xa9, 0xa4, 0xa4, 0x38, 0xb8, 0x77, 0x1c, 0xaa, 0x46, 0x36, + 0xfa, 0x00, 0xf2, 0xe9, 0x1a, 0xc5, 0x8c, 0xd0, 0xb7, 0xf6, 0xc2, 0xd5, 0x2d, 0xd8, 0x57, 0x98, + 0xba, 0xdc, 0xa9, 0xe0, 0x2f, 0xe9, 0x48, 0xaa, 0x13, 0x6e, 0xeb, 0x38, 0x55, 0x8b, 0xf4, 0x49, + 0xf0, 0xa3, 0x07, 0x3b, 0x4b, 0xb9, 0xa1, 0x5b, 0x50, 0x9b, 0xea, 0xa7, 0xd3, 0x28, 0x26, 0x66, + 0xba, 0x2c, 0x6b, 0x02, 0xd6, 0xdc, 0x27, 0xaa, 0xb1, 0x77, 0xe6, 0xac, 0x48, 0xdf, 0x5a, 0xa3, + 0xd7, 0x1d, 0x41, 0x39, 0xdc, 0x82, 0x1a, 0x1f, 0x8f, 0x67, 0x31, 0xa3, 0x9a, 0x5e, 0x5a, 0x8f, + 0x6e, 0xcd, 0x8a, 0xec, 0xc3, 0x79, 0x7b, 0x16, 0x9b, 0x78, 0xfe, 0x1a, 0x7c, 0xe5, 0xc1, 0x7b, + 0x2b, 0x92, 0xda, 0xeb, 0x7b, 0x1b, 0x2a, 0xe6, 0x73, 0x5a, 0xd3, 0xda, 0xb1, 0x5f, 0xec, 0xdb, + 0x25, 0x0f, 0xcb, 0x43, 0x1f, 0x03, 0x08, 0x4a, 0xe6, 0x8c, 0x60, 0x36, 0x3a, 0xb5, 0xab, 0xec, + 0x72, 0x61, 0x95, 0x85, 0xce, 0x38, 0x18, 0x4d, 0x69, 0x42, 0xc3, 0x02, 0x3d, 0xf8, 0xc3, 0x83, + 0xe6, 0xb3, 0xa1, 0x12, 0x73, 0xb9, 0xb4, 0xeb, 0x25, 0xf4, 0xce, 0x2a, 0xe1, 0xa2, 0x03, 0xb6, + 0x96, 0x3a, 0x60, 0xb9, 0x6a, 0xa5, 0x95, 0xaa, 0xa9, 0x2d, 0x99, 0x49, 0x2c, 0x64, 0x84, 0xc7, + 0x92, 0x8a, 0xa8, 0x28, 0x52, 0x29, 0x6c, 0x68, 0xd3, 0x03, 0x65, 0xc9, 0xb7, 0xf8, 0x47, 0x80, + 0x28, 0x23, 0xd1, 0x90, 0x8e, 0xb9, 0xa0, 0x8e, 0x6e, 0x26, 0xeb, 0x3e, 0x65, 0xa4, 0xa7, 0x0d, + 0x39, 0xdb, 0xfd, 0x55, 0x54, 0x0a, 0x7f, 0x15, 0xc1, 0x37, 0x1e, 0xb4, 0x96, 0x4f, 0x6a, 0x15, + 0xbf, 0xbb, 0xb6, 0x2d, 0x37, 0x6b, 0xee, 0x98, 0xff, 0x4b, 0xf5, 0xe3, 0x1f, 0x3c, 0xd8, 0x7f, + 0xf6, 0x86, 0x8a, 0x19, 0x3e, 0xed, 0xe7, 0x5f, 0x42, 0x7d, 0x80, 0xc5, 0x3a, 0x45, 0x57, 0x0a, + 0x39, 0xac, 0x6d, 0xd9, 0xf6, 0xd5, 0x0d, 0x56, 0x7b, 0xa4, 0x27, 0x50, 0x75, 0x0b, 0x17, 0x5d, + 0x2e, 0x70, 0x57, 0x57, 0x73, 0xfb, 0xca, 0xd9, 0x46, 0x13, 0xe7, 0xf8, 0x7b, 0x0f, 0x9a, 0xfa, + 0xc7, 0x66, 0x20, 0xb9, 0xa0, 0x8b, 0x54, 0x7b, 0x50, 0xd6, 0x9b, 0x19, 0x5d, 0x5a, 0x99, 0xaa, + 0x2e, 0xee, 0x3f, 0x8c, 0xdb, 0xe0, 0x1c, 0x7a, 0x0a, 0x55, 0xb7, 0xbc, 0x96, 0x73, 0x5c, 0xd9, + 0x73, 0xcb, 0x39, 0xae, 0xee, 0xbb, 0xe0, 0xdc, 0xf1, 0xd7, 0x1e, 0xb4, 0x0a, 0x3f, 0x35, 0x8b, + 0x34, 0x53, 0xb8, 0xb4, 0xe1, 0x57, 0x09, 0xdd, 0x28, 0xc4, 0xfc, 0xfb, 0x9f, 0xd4, 0xf6, 0xcd, + 0x7f, 0x43, 0xb5, 0x82, 0xfd, 0xe4, 0xc1, 0x9e, 0x69, 0x95, 0x45, 0x16, 0xcf, 0xa1, 0x5e, 0xec, + 0x3b, 0x54, 0x94, 0xe6, 0x8c, 0xab, 0xd7, 0xee, 0x6c, 0xb4, 0x3b, 0xed, 0x5e, 0xac, 0x0e, 0xbd, + 0xce, 0xc6, 0x8e, 0xb5, 0x41, 0x0f, 0x36, 0x13, 0xf2, 0xa8, 0xbd, 0xd6, 0xe7, 0x28, 0x93, 0x5c, + 0xbc, 0xec, 0xc6, 0xfc, 0x68, 0xc4, 0x93, 0x84, 0xb3, 0xa3, 0x74, 0x38, 0xac, 0xe8, 0xa5, 0x74, + 0xe7, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0xd5, 0x0c, 0xd9, 0x34, 0x0c, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCOverlayInspectorClient interface { + DRPCConn() drpc.Conn + + // CountNodes returns the number of nodes in the cache + CountNodes(ctx context.Context, in *CountNodesRequest) (*CountNodesResponse, error) + // DumpNodes returns all the nodes in the cache + DumpNodes(ctx context.Context, in *DumpNodesRequest) (*DumpNodesResponse, error) +} + +type drpcOverlayInspectorClient struct { + cc drpc.Conn +} + +func NewDRPCOverlayInspectorClient(cc drpc.Conn) DRPCOverlayInspectorClient { + return &drpcOverlayInspectorClient{cc} +} + +func (c *drpcOverlayInspectorClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcOverlayInspectorClient) CountNodes(ctx context.Context, in *CountNodesRequest) (*CountNodesResponse, error) { + out := new(CountNodesResponse) + err := c.cc.Invoke(ctx, "/inspector.OverlayInspector/CountNodes", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcOverlayInspectorClient) DumpNodes(ctx context.Context, in *DumpNodesRequest) (*DumpNodesResponse, error) { + out := new(DumpNodesResponse) + err := c.cc.Invoke(ctx, "/inspector.OverlayInspector/DumpNodes", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCOverlayInspectorServer interface { + // CountNodes returns the number of nodes in the cache + CountNodes(context.Context, *CountNodesRequest) (*CountNodesResponse, error) + // DumpNodes returns all the nodes in the cache + DumpNodes(context.Context, *DumpNodesRequest) (*DumpNodesResponse, error) +} + +type DRPCOverlayInspectorDescription struct{} + +func (DRPCOverlayInspectorDescription) NumMethods() int { return 2 } + +func (DRPCOverlayInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/inspector.OverlayInspector/CountNodes", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCOverlayInspectorServer). + CountNodes( + ctx, + in1.(*CountNodesRequest), + ) + }, DRPCOverlayInspectorServer.CountNodes, true + case 1: + return "/inspector.OverlayInspector/DumpNodes", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCOverlayInspectorServer). + DumpNodes( + ctx, + in1.(*DumpNodesRequest), + ) + }, DRPCOverlayInspectorServer.DumpNodes, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterOverlayInspector(mux drpc.Mux, impl DRPCOverlayInspectorServer) error { + return mux.Register(impl, DRPCOverlayInspectorDescription{}) +} + +type DRPCOverlayInspector_CountNodesStream interface { + drpc.Stream + SendAndClose(*CountNodesResponse) error +} + +type drpcOverlayInspectorCountNodesStream struct { + drpc.Stream +} + +func (x *drpcOverlayInspectorCountNodesStream) SendAndClose(m *CountNodesResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCOverlayInspector_DumpNodesStream interface { + drpc.Stream + SendAndClose(*DumpNodesResponse) error +} + +type drpcOverlayInspectorDumpNodesStream struct { + drpc.Stream +} + +func (x *drpcOverlayInspectorDumpNodesStream) SendAndClose(m *DumpNodesResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPieceStoreInspectorClient interface { + DRPCConn() drpc.Conn + + // Stats return space and bandwidth stats for a storagenode + Stats(ctx context.Context, in *StatsRequest) (*StatSummaryResponse, error) + // Dashboard returns stats for a specific storagenode + Dashboard(ctx context.Context, in *DashboardRequest) (*DashboardResponse, error) +} + +type drpcPieceStoreInspectorClient struct { + cc drpc.Conn +} + +func NewDRPCPieceStoreInspectorClient(cc drpc.Conn) DRPCPieceStoreInspectorClient { + return &drpcPieceStoreInspectorClient{cc} +} + +func (c *drpcPieceStoreInspectorClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcPieceStoreInspectorClient) Stats(ctx context.Context, in *StatsRequest) (*StatSummaryResponse, error) { + out := new(StatSummaryResponse) + err := c.cc.Invoke(ctx, "/inspector.PieceStoreInspector/Stats", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPieceStoreInspectorClient) Dashboard(ctx context.Context, in *DashboardRequest) (*DashboardResponse, error) { + out := new(DashboardResponse) + err := c.cc.Invoke(ctx, "/inspector.PieceStoreInspector/Dashboard", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCPieceStoreInspectorServer interface { + // Stats return space and bandwidth stats for a storagenode + Stats(context.Context, *StatsRequest) (*StatSummaryResponse, error) + // Dashboard returns stats for a specific storagenode + Dashboard(context.Context, *DashboardRequest) (*DashboardResponse, error) +} + +type DRPCPieceStoreInspectorDescription struct{} + +func (DRPCPieceStoreInspectorDescription) NumMethods() int { return 2 } + +func (DRPCPieceStoreInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/inspector.PieceStoreInspector/Stats", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPieceStoreInspectorServer). + Stats( + ctx, + in1.(*StatsRequest), + ) + }, DRPCPieceStoreInspectorServer.Stats, true + case 1: + return "/inspector.PieceStoreInspector/Dashboard", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPieceStoreInspectorServer). + Dashboard( + ctx, + in1.(*DashboardRequest), + ) + }, DRPCPieceStoreInspectorServer.Dashboard, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterPieceStoreInspector(mux drpc.Mux, impl DRPCPieceStoreInspectorServer) error { + return mux.Register(impl, DRPCPieceStoreInspectorDescription{}) +} + +type DRPCPieceStoreInspector_StatsStream interface { + drpc.Stream + SendAndClose(*StatSummaryResponse) error +} + +type drpcPieceStoreInspectorStatsStream struct { + drpc.Stream +} + +func (x *drpcPieceStoreInspectorStatsStream) SendAndClose(m *StatSummaryResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPieceStoreInspector_DashboardStream interface { + drpc.Stream + SendAndClose(*DashboardResponse) error +} + +type drpcPieceStoreInspectorDashboardStream struct { + drpc.Stream +} + +func (x *drpcPieceStoreInspectorDashboardStream) SendAndClose(m *DashboardResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCIrreparableInspectorClient interface { + DRPCConn() drpc.Conn + + // ListIrreparableSegments returns damaged segments + ListIrreparableSegments(ctx context.Context, in *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error) +} + +type drpcIrreparableInspectorClient struct { + cc drpc.Conn +} + +func NewDRPCIrreparableInspectorClient(cc drpc.Conn) DRPCIrreparableInspectorClient { + return &drpcIrreparableInspectorClient{cc} +} + +func (c *drpcIrreparableInspectorClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcIrreparableInspectorClient) ListIrreparableSegments(ctx context.Context, in *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error) { + out := new(ListIrreparableSegmentsResponse) + err := c.cc.Invoke(ctx, "/inspector.IrreparableInspector/ListIrreparableSegments", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCIrreparableInspectorServer interface { + // ListIrreparableSegments returns damaged segments + ListIrreparableSegments(context.Context, *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error) +} + +type DRPCIrreparableInspectorDescription struct{} + +func (DRPCIrreparableInspectorDescription) NumMethods() int { return 1 } + +func (DRPCIrreparableInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/inspector.IrreparableInspector/ListIrreparableSegments", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCIrreparableInspectorServer). + ListIrreparableSegments( + ctx, + in1.(*ListIrreparableSegmentsRequest), + ) + }, DRPCIrreparableInspectorServer.ListIrreparableSegments, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterIrreparableInspector(mux drpc.Mux, impl DRPCIrreparableInspectorServer) error { + return mux.Register(impl, DRPCIrreparableInspectorDescription{}) +} + +type DRPCIrreparableInspector_ListIrreparableSegmentsStream interface { + drpc.Stream + SendAndClose(*ListIrreparableSegmentsResponse) error +} + +type drpcIrreparableInspectorListIrreparableSegmentsStream struct { + drpc.Stream +} + +func (x *drpcIrreparableInspectorListIrreparableSegmentsStream) SendAndClose(m *ListIrreparableSegmentsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCHealthInspectorClient interface { + DRPCConn() drpc.Conn + + // ObjectHealth will return stats about the health of an object + ObjectHealth(ctx context.Context, in *ObjectHealthRequest) (*ObjectHealthResponse, error) + // SegmentHealth will return stats about the health of a segment + SegmentHealth(ctx context.Context, in *SegmentHealthRequest) (*SegmentHealthResponse, error) +} + +type drpcHealthInspectorClient struct { + cc drpc.Conn +} + +func NewDRPCHealthInspectorClient(cc drpc.Conn) DRPCHealthInspectorClient { + return &drpcHealthInspectorClient{cc} +} + +func (c *drpcHealthInspectorClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcHealthInspectorClient) ObjectHealth(ctx context.Context, in *ObjectHealthRequest) (*ObjectHealthResponse, error) { + out := new(ObjectHealthResponse) + err := c.cc.Invoke(ctx, "/inspector.HealthInspector/ObjectHealth", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcHealthInspectorClient) SegmentHealth(ctx context.Context, in *SegmentHealthRequest) (*SegmentHealthResponse, error) { + out := new(SegmentHealthResponse) + err := c.cc.Invoke(ctx, "/inspector.HealthInspector/SegmentHealth", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCHealthInspectorServer interface { + // ObjectHealth will return stats about the health of an object + ObjectHealth(context.Context, *ObjectHealthRequest) (*ObjectHealthResponse, error) + // SegmentHealth will return stats about the health of a segment + SegmentHealth(context.Context, *SegmentHealthRequest) (*SegmentHealthResponse, error) +} + +type DRPCHealthInspectorDescription struct{} + +func (DRPCHealthInspectorDescription) NumMethods() int { return 2 } + +func (DRPCHealthInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/inspector.HealthInspector/ObjectHealth", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCHealthInspectorServer). + ObjectHealth( + ctx, + in1.(*ObjectHealthRequest), + ) + }, DRPCHealthInspectorServer.ObjectHealth, true + case 1: + return "/inspector.HealthInspector/SegmentHealth", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCHealthInspectorServer). + SegmentHealth( + ctx, + in1.(*SegmentHealthRequest), + ) + }, DRPCHealthInspectorServer.SegmentHealth, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterHealthInspector(mux drpc.Mux, impl DRPCHealthInspectorServer) error { + return mux.Register(impl, DRPCHealthInspectorDescription{}) +} + +type DRPCHealthInspector_ObjectHealthStream interface { + drpc.Stream + SendAndClose(*ObjectHealthResponse) error +} + +type drpcHealthInspectorObjectHealthStream struct { + drpc.Stream +} + +func (x *drpcHealthInspectorObjectHealthStream) SendAndClose(m *ObjectHealthResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCHealthInspector_SegmentHealthStream interface { + drpc.Stream + SendAndClose(*SegmentHealthResponse) error +} + +type drpcHealthInspectorSegmentHealthStream struct { + drpc.Stream +} + +func (x *drpcHealthInspectorSegmentHealthStream) SendAndClose(m *SegmentHealthResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/inspector.proto b/vendor/storj.io/common/pb/inspector.proto new file mode 100644 index 000000000..df5977b3a --- /dev/null +++ b/vendor/storj.io/common/pb/inspector.proto @@ -0,0 +1,132 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +import "gogo.proto"; +import "node.proto"; +import "pointerdb.proto"; +import "google/protobuf/timestamp.proto"; + +package inspector; + +service OverlayInspector { + // CountNodes returns the number of nodes in the cache + rpc CountNodes(CountNodesRequest) returns (CountNodesResponse); + // DumpNodes returns all the nodes in the cache + rpc DumpNodes(DumpNodesRequest) returns (DumpNodesResponse); +} + +service PieceStoreInspector { + // Stats return space and bandwidth stats for a storagenode + rpc Stats(StatsRequest) returns (StatSummaryResponse) {} + // Dashboard returns stats for a specific storagenode + rpc Dashboard(DashboardRequest) returns (DashboardResponse) {} +} + +service IrreparableInspector { + // ListIrreparableSegments returns damaged segments + rpc ListIrreparableSegments(ListIrreparableSegmentsRequest) returns (ListIrreparableSegmentsResponse); +} + +service HealthInspector { + // ObjectHealth will return stats about the health of an object + rpc ObjectHealth(ObjectHealthRequest) returns (ObjectHealthResponse) {} + // SegmentHealth will return stats about the health of a segment + rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {} +} + +// ListSegments +message ListIrreparableSegmentsRequest { + int32 limit = 1; + bytes last_seen_segment_path = 2; +} + +message IrreparableSegment { + bytes path = 1; + pointerdb.Pointer segment_detail = 2; + int32 lost_pieces = 3; + int64 last_repair_attempt = 4; + int64 repair_attempt_count = 5; +} + +message ListIrreparableSegmentsResponse { + repeated IrreparableSegment segments = 1; +} + +// CountNodes +message CountNodesResponse { + int64 count = 1; +} + +message CountNodesRequest { +} + +message DumpNodesRequest {} + +message DumpNodesResponse { + repeated node.Node nodes = 1; +} +message StatsRequest { +} + +message StatSummaryResponse { + int64 used_space = 1; + int64 available_space = 2; + int64 used_ingress = 3; + int64 used_egress = 4; + int64 used_bandwidth = 5; + int64 available_bandwidth = 6; +} + +message DashboardRequest { +} + +message DashboardResponse { + bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + int64 node_connections = 2; + string bootstrap_address = 3 [deprecated=true]; + string internal_address = 4; + string external_address = 5; + string dashboard_address = 6; + StatSummaryResponse stats = 7; + string uptime = 8; + google.protobuf.Timestamp last_pinged = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp last_queried = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + bytes last_ping_from_id = 11 [(gogoproto.customtype) = "NodeID"]; + string last_ping_from_address = 12; +} + +message SegmentHealthRequest { + bytes bucket = 1; // segment bucket name + bytes encrypted_path = 2; // segment encrypted path + int64 segment_index = 3; // segment index + bytes project_id = 4; // segment project id +} + +message SegmentHealth { + repeated bytes healthy_ids = 1 [(gogoproto.customtype) = "NodeID"]; // online + not disqualified + repeated bytes unhealthy_ids = 2 [(gogoproto.customtype) = "NodeID"]; // online + disqualified + repeated bytes offline_ids = 3 [(gogoproto.customtype) = "NodeID"]; // offline + bytes segment = 4; // path formatted segment index +} + +message SegmentHealthResponse { + SegmentHealth health = 1; // Information about a segment's health + pointerdb.RedundancyScheme redundancy = 2; // expected segment info +} + +message ObjectHealthRequest { + bytes encrypted_path = 1; // object encrypted path + bytes bucket = 2; // object bucket name + bytes project_id = 3; // object project id + int64 start_after_segment = 4; // Get all segments after specified segment index + int64 end_before_segment = 5; // Stop at segment before specified segment index + int32 limit = 6; // Max number of segments that are checked +} + +message ObjectHealthResponse { + repeated SegmentHealth segments = 1; // actual segment info + pointerdb.RedundancyScheme redundancy = 2; // expected segment info +} \ No newline at end of file diff --git a/vendor/storj.io/common/pb/meta.pb.go b/vendor/storj.io/common/pb/meta.pb.go new file mode 100644 index 000000000..143cd07e4 --- /dev/null +++ b/vendor/storj.io/common/pb/meta.pb.go @@ -0,0 +1,93 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: meta.proto + +package pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// SerializableMeta is the object metadata that will be stored serialized +type SerializableMeta struct { + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + UserDefined map[string]string `protobuf:"bytes,2,rep,name=user_defined,json=userDefined,proto3" json:"user_defined,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SerializableMeta) Reset() { *m = SerializableMeta{} } +func (m *SerializableMeta) String() string { return proto.CompactTextString(m) } +func (*SerializableMeta) ProtoMessage() {} +func (*SerializableMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_3b5ea8fe65782bcc, []int{0} +} +func (m *SerializableMeta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SerializableMeta.Unmarshal(m, b) +} +func (m *SerializableMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SerializableMeta.Marshal(b, m, deterministic) +} +func (m *SerializableMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializableMeta.Merge(m, src) +} +func (m *SerializableMeta) XXX_Size() int { + return xxx_messageInfo_SerializableMeta.Size(m) +} +func (m *SerializableMeta) XXX_DiscardUnknown() { + xxx_messageInfo_SerializableMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_SerializableMeta proto.InternalMessageInfo + +func (m *SerializableMeta) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +func (m *SerializableMeta) GetUserDefined() map[string]string { + if m != nil { + return m.UserDefined + } + return nil +} + +func init() { + proto.RegisterType((*SerializableMeta)(nil), "objects.SerializableMeta") + proto.RegisterMapType((map[string]string)(nil), "objects.SerializableMeta.UserDefinedEntry") +} + +func init() { proto.RegisterFile("meta.proto", fileDescriptor_3b5ea8fe65782bcc) } + +var fileDescriptor_3b5ea8fe65782bcc = []byte{ + // 207 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xca, 0x4d, 0x2d, 0x49, + 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xcf, 0x4f, 0xca, 0x4a, 0x4d, 0x2e, 0x29, 0x56, + 0x3a, 0xc2, 0xc8, 0x25, 0x10, 0x9c, 0x5a, 0x94, 0x99, 0x98, 0x93, 0x59, 0x95, 0x98, 0x94, 0x93, + 0xea, 0x9b, 0x5a, 0x92, 0x28, 0xa4, 0xc8, 0xc5, 0x93, 0x9c, 0x9f, 0x57, 0x92, 0x9a, 0x57, 0x12, + 0x5f, 0x52, 0x59, 0x90, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0xc4, 0x0d, 0x15, 0x0b, 0xa9, + 0x2c, 0x48, 0x15, 0xf2, 0xe5, 0xe2, 0x29, 0x2d, 0x4e, 0x2d, 0x8a, 0x4f, 0x49, 0x4d, 0xcb, 0xcc, + 0x4b, 0x4d, 0x91, 0x60, 0x52, 0x60, 0xd6, 0xe0, 0x36, 0xd2, 0xd2, 0x83, 0x9a, 0xab, 0x87, 0x6e, + 0xa6, 0x5e, 0x68, 0x71, 0x6a, 0x91, 0x0b, 0x44, 0xb1, 0x6b, 0x5e, 0x49, 0x51, 0x65, 0x10, 0x77, + 0x29, 0x42, 0x44, 0xca, 0x8e, 0x4b, 0x00, 0x5d, 0x81, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, + 0xd4, 0x72, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, + 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, 0x89, 0x44, 0x09, 0x15, 0x97, 0xe4, 0x17, 0x65, 0xe9, + 0x65, 0xe6, 0xeb, 0x27, 0xe7, 0xe7, 0xe6, 0xe6, 0xe7, 0xe9, 0x17, 0x24, 0x25, 0xb1, 0x81, 0x3d, + 0x6b, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xec, 0xbb, 0x6b, 0x9d, 0xfa, 0x00, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/meta.proto b/vendor/storj.io/common/pb/meta.proto new file mode 100644 index 000000000..4737339d3 --- /dev/null +++ b/vendor/storj.io/common/pb/meta.proto @@ -0,0 +1,13 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package objects; + +// SerializableMeta is the object metadata that will be stored serialized +message SerializableMeta { + string content_type = 1; + map user_defined = 2; +} \ No newline at end of file diff --git a/vendor/storj.io/common/pb/metainfo.pb.go b/vendor/storj.io/common/pb/metainfo.pb.go new file mode 100644 index 000000000..6544f6166 --- /dev/null +++ b/vendor/storj.io/common/pb/metainfo.pb.go @@ -0,0 +1,6409 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: metainfo.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Object_Status int32 + +const ( + Object_INVALID Object_Status = 0 + Object_UPLOADING Object_Status = 1 + Object_COMMITTING Object_Status = 2 + Object_COMMITTED Object_Status = 3 + Object_DELETING Object_Status = 4 +) + +var Object_Status_name = map[int32]string{ + 0: "INVALID", + 1: "UPLOADING", + 2: "COMMITTING", + 3: "COMMITTED", + 4: "DELETING", +} + +var Object_Status_value = map[string]int32{ + "INVALID": 0, + "UPLOADING": 1, + "COMMITTING": 2, + "COMMITTED": 3, + "DELETING": 4, +} + +func (x Object_Status) String() string { + return proto.EnumName(Object_Status_name, int32(x)) +} + +func (Object_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{30, 0} +} + +type RequestHeader struct { + ApiKey []byte `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"` + UserAgent []byte `protobuf:"bytes,2,opt,name=user_agent,json=userAgent,proto3" json:"user_agent,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestHeader) Reset() { *m = RequestHeader{} } +func (m *RequestHeader) String() string { return proto.CompactTextString(m) } +func (*RequestHeader) ProtoMessage() {} +func (*RequestHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{0} +} +func (m *RequestHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestHeader.Unmarshal(m, b) +} +func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic) +} +func (m *RequestHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestHeader.Merge(m, src) +} +func (m *RequestHeader) XXX_Size() int { + return xxx_messageInfo_RequestHeader.Size(m) +} +func (m *RequestHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RequestHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestHeader proto.InternalMessageInfo + +func (m *RequestHeader) GetApiKey() []byte { + if m != nil { + return m.ApiKey + } + return nil +} + +func (m *RequestHeader) GetUserAgent() []byte { + if m != nil { + return m.UserAgent + } + return nil +} + +type Bucket struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + PathCipher CipherSuite `protobuf:"varint,2,opt,name=path_cipher,json=pathCipher,proto3,enum=encryption.CipherSuite" json:"path_cipher,omitempty"` + CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + DefaultSegmentSize int64 `protobuf:"varint,4,opt,name=default_segment_size,json=defaultSegmentSize,proto3" json:"default_segment_size,omitempty"` + DefaultRedundancyScheme *RedundancyScheme `protobuf:"bytes,5,opt,name=default_redundancy_scheme,json=defaultRedundancyScheme,proto3" json:"default_redundancy_scheme,omitempty"` + DefaultEncryptionParameters *EncryptionParameters `protobuf:"bytes,6,opt,name=default_encryption_parameters,json=defaultEncryptionParameters,proto3" json:"default_encryption_parameters,omitempty"` + PartnerId []byte `protobuf:"bytes,7,opt,name=partner_id,json=partnerId,proto3" json:"partner_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{1} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *Bucket) GetPathCipher() CipherSuite { + if m != nil { + return m.PathCipher + } + return CipherSuite_ENC_UNSPECIFIED +} + +func (m *Bucket) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *Bucket) GetDefaultSegmentSize() int64 { + if m != nil { + return m.DefaultSegmentSize + } + return 0 +} + +func (m *Bucket) GetDefaultRedundancyScheme() *RedundancyScheme { + if m != nil { + return m.DefaultRedundancyScheme + } + return nil +} + +func (m *Bucket) GetDefaultEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.DefaultEncryptionParameters + } + return nil +} + +func (m *Bucket) GetPartnerId() []byte { + if m != nil { + return m.PartnerId + } + return nil +} + +type BucketListItem struct { + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CreatedAt time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketListItem) Reset() { *m = BucketListItem{} } +func (m *BucketListItem) String() string { return proto.CompactTextString(m) } +func (*BucketListItem) ProtoMessage() {} +func (*BucketListItem) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{2} +} +func (m *BucketListItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketListItem.Unmarshal(m, b) +} +func (m *BucketListItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketListItem.Marshal(b, m, deterministic) +} +func (m *BucketListItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketListItem.Merge(m, src) +} +func (m *BucketListItem) XXX_Size() int { + return xxx_messageInfo_BucketListItem.Size(m) +} +func (m *BucketListItem) XXX_DiscardUnknown() { + xxx_messageInfo_BucketListItem.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketListItem proto.InternalMessageInfo + +func (m *BucketListItem) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *BucketListItem) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +type BucketCreateRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + PathCipher CipherSuite `protobuf:"varint,2,opt,name=path_cipher,json=pathCipher,proto3,enum=encryption.CipherSuite" json:"path_cipher,omitempty"` + DefaultSegmentSize int64 `protobuf:"varint,3,opt,name=default_segment_size,json=defaultSegmentSize,proto3" json:"default_segment_size,omitempty"` + DefaultRedundancyScheme *RedundancyScheme `protobuf:"bytes,4,opt,name=default_redundancy_scheme,json=defaultRedundancyScheme,proto3" json:"default_redundancy_scheme,omitempty"` + DefaultEncryptionParameters *EncryptionParameters `protobuf:"bytes,5,opt,name=default_encryption_parameters,json=defaultEncryptionParameters,proto3" json:"default_encryption_parameters,omitempty"` + PartnerId []byte `protobuf:"bytes,6,opt,name=partner_id,json=partnerId,proto3" json:"partner_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketCreateRequest) Reset() { *m = BucketCreateRequest{} } +func (m *BucketCreateRequest) String() string { return proto.CompactTextString(m) } +func (*BucketCreateRequest) ProtoMessage() {} +func (*BucketCreateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{3} +} +func (m *BucketCreateRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketCreateRequest.Unmarshal(m, b) +} +func (m *BucketCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketCreateRequest.Marshal(b, m, deterministic) +} +func (m *BucketCreateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketCreateRequest.Merge(m, src) +} +func (m *BucketCreateRequest) XXX_Size() int { + return xxx_messageInfo_BucketCreateRequest.Size(m) +} +func (m *BucketCreateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BucketCreateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketCreateRequest proto.InternalMessageInfo + +func (m *BucketCreateRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BucketCreateRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *BucketCreateRequest) GetPathCipher() CipherSuite { + if m != nil { + return m.PathCipher + } + return CipherSuite_ENC_UNSPECIFIED +} + +func (m *BucketCreateRequest) GetDefaultSegmentSize() int64 { + if m != nil { + return m.DefaultSegmentSize + } + return 0 +} + +func (m *BucketCreateRequest) GetDefaultRedundancyScheme() *RedundancyScheme { + if m != nil { + return m.DefaultRedundancyScheme + } + return nil +} + +func (m *BucketCreateRequest) GetDefaultEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.DefaultEncryptionParameters + } + return nil +} + +func (m *BucketCreateRequest) GetPartnerId() []byte { + if m != nil { + return m.PartnerId + } + return nil +} + +type BucketCreateResponse struct { + Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketCreateResponse) Reset() { *m = BucketCreateResponse{} } +func (m *BucketCreateResponse) String() string { return proto.CompactTextString(m) } +func (*BucketCreateResponse) ProtoMessage() {} +func (*BucketCreateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{4} +} +func (m *BucketCreateResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketCreateResponse.Unmarshal(m, b) +} +func (m *BucketCreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketCreateResponse.Marshal(b, m, deterministic) +} +func (m *BucketCreateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketCreateResponse.Merge(m, src) +} +func (m *BucketCreateResponse) XXX_Size() int { + return xxx_messageInfo_BucketCreateResponse.Size(m) +} +func (m *BucketCreateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BucketCreateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketCreateResponse proto.InternalMessageInfo + +func (m *BucketCreateResponse) GetBucket() *Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type BucketGetRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketGetRequest) Reset() { *m = BucketGetRequest{} } +func (m *BucketGetRequest) String() string { return proto.CompactTextString(m) } +func (*BucketGetRequest) ProtoMessage() {} +func (*BucketGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{5} +} +func (m *BucketGetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketGetRequest.Unmarshal(m, b) +} +func (m *BucketGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketGetRequest.Marshal(b, m, deterministic) +} +func (m *BucketGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketGetRequest.Merge(m, src) +} +func (m *BucketGetRequest) XXX_Size() int { + return xxx_messageInfo_BucketGetRequest.Size(m) +} +func (m *BucketGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BucketGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketGetRequest proto.InternalMessageInfo + +func (m *BucketGetRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BucketGetRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +type BucketGetResponse struct { + Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketGetResponse) Reset() { *m = BucketGetResponse{} } +func (m *BucketGetResponse) String() string { return proto.CompactTextString(m) } +func (*BucketGetResponse) ProtoMessage() {} +func (*BucketGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{6} +} +func (m *BucketGetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketGetResponse.Unmarshal(m, b) +} +func (m *BucketGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketGetResponse.Marshal(b, m, deterministic) +} +func (m *BucketGetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketGetResponse.Merge(m, src) +} +func (m *BucketGetResponse) XXX_Size() int { + return xxx_messageInfo_BucketGetResponse.Size(m) +} +func (m *BucketGetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BucketGetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketGetResponse proto.InternalMessageInfo + +func (m *BucketGetResponse) GetBucket() *Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type BucketDeleteRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketDeleteRequest) Reset() { *m = BucketDeleteRequest{} } +func (m *BucketDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*BucketDeleteRequest) ProtoMessage() {} +func (*BucketDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{7} +} +func (m *BucketDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketDeleteRequest.Unmarshal(m, b) +} +func (m *BucketDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketDeleteRequest.Marshal(b, m, deterministic) +} +func (m *BucketDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketDeleteRequest.Merge(m, src) +} +func (m *BucketDeleteRequest) XXX_Size() int { + return xxx_messageInfo_BucketDeleteRequest.Size(m) +} +func (m *BucketDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BucketDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketDeleteRequest proto.InternalMessageInfo + +func (m *BucketDeleteRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BucketDeleteRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +type BucketDeleteResponse struct { + Bucket *Bucket `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketDeleteResponse) Reset() { *m = BucketDeleteResponse{} } +func (m *BucketDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*BucketDeleteResponse) ProtoMessage() {} +func (*BucketDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{8} +} +func (m *BucketDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketDeleteResponse.Unmarshal(m, b) +} +func (m *BucketDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketDeleteResponse.Marshal(b, m, deterministic) +} +func (m *BucketDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketDeleteResponse.Merge(m, src) +} +func (m *BucketDeleteResponse) XXX_Size() int { + return xxx_messageInfo_BucketDeleteResponse.Size(m) +} +func (m *BucketDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BucketDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketDeleteResponse proto.InternalMessageInfo + +func (m *BucketDeleteResponse) GetBucket() *Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type BucketListRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Cursor []byte `protobuf:"bytes,1,opt,name=cursor,proto3" json:"cursor,omitempty"` + Limit int32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + Direction int32 `protobuf:"varint,3,opt,name=direction,proto3" json:"direction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketListRequest) Reset() { *m = BucketListRequest{} } +func (m *BucketListRequest) String() string { return proto.CompactTextString(m) } +func (*BucketListRequest) ProtoMessage() {} +func (*BucketListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{9} +} +func (m *BucketListRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketListRequest.Unmarshal(m, b) +} +func (m *BucketListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketListRequest.Marshal(b, m, deterministic) +} +func (m *BucketListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketListRequest.Merge(m, src) +} +func (m *BucketListRequest) XXX_Size() int { + return xxx_messageInfo_BucketListRequest.Size(m) +} +func (m *BucketListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BucketListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketListRequest proto.InternalMessageInfo + +func (m *BucketListRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BucketListRequest) GetCursor() []byte { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *BucketListRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *BucketListRequest) GetDirection() int32 { + if m != nil { + return m.Direction + } + return 0 +} + +type BucketListResponse struct { + Items []*BucketListItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketListResponse) Reset() { *m = BucketListResponse{} } +func (m *BucketListResponse) String() string { return proto.CompactTextString(m) } +func (*BucketListResponse) ProtoMessage() {} +func (*BucketListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{10} +} +func (m *BucketListResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketListResponse.Unmarshal(m, b) +} +func (m *BucketListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketListResponse.Marshal(b, m, deterministic) +} +func (m *BucketListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketListResponse.Merge(m, src) +} +func (m *BucketListResponse) XXX_Size() int { + return xxx_messageInfo_BucketListResponse.Size(m) +} +func (m *BucketListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BucketListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketListResponse proto.InternalMessageInfo + +func (m *BucketListResponse) GetItems() []*BucketListItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *BucketListResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +type BucketSetAttributionRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + PartnerId []byte `protobuf:"bytes,2,opt,name=partner_id,json=partnerId,proto3" json:"partner_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSetAttributionRequest) Reset() { *m = BucketSetAttributionRequest{} } +func (m *BucketSetAttributionRequest) String() string { return proto.CompactTextString(m) } +func (*BucketSetAttributionRequest) ProtoMessage() {} +func (*BucketSetAttributionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{11} +} +func (m *BucketSetAttributionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSetAttributionRequest.Unmarshal(m, b) +} +func (m *BucketSetAttributionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSetAttributionRequest.Marshal(b, m, deterministic) +} +func (m *BucketSetAttributionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSetAttributionRequest.Merge(m, src) +} +func (m *BucketSetAttributionRequest) XXX_Size() int { + return xxx_messageInfo_BucketSetAttributionRequest.Size(m) +} +func (m *BucketSetAttributionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSetAttributionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSetAttributionRequest proto.InternalMessageInfo + +func (m *BucketSetAttributionRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BucketSetAttributionRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *BucketSetAttributionRequest) GetPartnerId() []byte { + if m != nil { + return m.PartnerId + } + return nil +} + +type BucketSetAttributionResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketSetAttributionResponse) Reset() { *m = BucketSetAttributionResponse{} } +func (m *BucketSetAttributionResponse) String() string { return proto.CompactTextString(m) } +func (*BucketSetAttributionResponse) ProtoMessage() {} +func (*BucketSetAttributionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{12} +} +func (m *BucketSetAttributionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketSetAttributionResponse.Unmarshal(m, b) +} +func (m *BucketSetAttributionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketSetAttributionResponse.Marshal(b, m, deterministic) +} +func (m *BucketSetAttributionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketSetAttributionResponse.Merge(m, src) +} +func (m *BucketSetAttributionResponse) XXX_Size() int { + return xxx_messageInfo_BucketSetAttributionResponse.Size(m) +} +func (m *BucketSetAttributionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BucketSetAttributionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketSetAttributionResponse proto.InternalMessageInfo + +type AddressedOrderLimit struct { + Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` + StorageNodeAddress *NodeAddress `protobuf:"bytes,2,opt,name=storage_node_address,json=storageNodeAddress,proto3" json:"storage_node_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddressedOrderLimit) Reset() { *m = AddressedOrderLimit{} } +func (m *AddressedOrderLimit) String() string { return proto.CompactTextString(m) } +func (*AddressedOrderLimit) ProtoMessage() {} +func (*AddressedOrderLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{13} +} +func (m *AddressedOrderLimit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddressedOrderLimit.Unmarshal(m, b) +} +func (m *AddressedOrderLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddressedOrderLimit.Marshal(b, m, deterministic) +} +func (m *AddressedOrderLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddressedOrderLimit.Merge(m, src) +} +func (m *AddressedOrderLimit) XXX_Size() int { + return xxx_messageInfo_AddressedOrderLimit.Size(m) +} +func (m *AddressedOrderLimit) XXX_DiscardUnknown() { + xxx_messageInfo_AddressedOrderLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_AddressedOrderLimit proto.InternalMessageInfo + +func (m *AddressedOrderLimit) GetLimit() *OrderLimit { + if m != nil { + return m.Limit + } + return nil +} + +func (m *AddressedOrderLimit) GetStorageNodeAddress() *NodeAddress { + if m != nil { + return m.StorageNodeAddress + } + return nil +} + +type SegmentWriteRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Path []byte `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Segment int64 `protobuf:"varint,3,opt,name=segment,proto3" json:"segment,omitempty"` + Redundancy *RedundancyScheme `protobuf:"bytes,4,opt,name=redundancy,proto3" json:"redundancy,omitempty"` + MaxEncryptedSegmentSize int64 `protobuf:"varint,5,opt,name=max_encrypted_segment_size,json=maxEncryptedSegmentSize,proto3" json:"max_encrypted_segment_size,omitempty"` + Expiration time.Time `protobuf:"bytes,6,opt,name=expiration,proto3,stdtime" json:"expiration"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentWriteRequestOld) Reset() { *m = SegmentWriteRequestOld{} } +func (m *SegmentWriteRequestOld) String() string { return proto.CompactTextString(m) } +func (*SegmentWriteRequestOld) ProtoMessage() {} +func (*SegmentWriteRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{14} +} +func (m *SegmentWriteRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentWriteRequestOld.Unmarshal(m, b) +} +func (m *SegmentWriteRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentWriteRequestOld.Marshal(b, m, deterministic) +} +func (m *SegmentWriteRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentWriteRequestOld.Merge(m, src) +} +func (m *SegmentWriteRequestOld) XXX_Size() int { + return xxx_messageInfo_SegmentWriteRequestOld.Size(m) +} +func (m *SegmentWriteRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentWriteRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentWriteRequestOld proto.InternalMessageInfo + +func (m *SegmentWriteRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentWriteRequestOld) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SegmentWriteRequestOld) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *SegmentWriteRequestOld) GetSegment() int64 { + if m != nil { + return m.Segment + } + return 0 +} + +func (m *SegmentWriteRequestOld) GetRedundancy() *RedundancyScheme { + if m != nil { + return m.Redundancy + } + return nil +} + +func (m *SegmentWriteRequestOld) GetMaxEncryptedSegmentSize() int64 { + if m != nil { + return m.MaxEncryptedSegmentSize + } + return 0 +} + +func (m *SegmentWriteRequestOld) GetExpiration() time.Time { + if m != nil { + return m.Expiration + } + return time.Time{} +} + +type SegmentWriteResponseOld struct { + AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,1,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"` + RootPieceId PieceID `protobuf:"bytes,2,opt,name=root_piece_id,json=rootPieceId,proto3,customtype=PieceID" json:"root_piece_id"` + PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentWriteResponseOld) Reset() { *m = SegmentWriteResponseOld{} } +func (m *SegmentWriteResponseOld) String() string { return proto.CompactTextString(m) } +func (*SegmentWriteResponseOld) ProtoMessage() {} +func (*SegmentWriteResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{15} +} +func (m *SegmentWriteResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentWriteResponseOld.Unmarshal(m, b) +} +func (m *SegmentWriteResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentWriteResponseOld.Marshal(b, m, deterministic) +} +func (m *SegmentWriteResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentWriteResponseOld.Merge(m, src) +} +func (m *SegmentWriteResponseOld) XXX_Size() int { + return xxx_messageInfo_SegmentWriteResponseOld.Size(m) +} +func (m *SegmentWriteResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentWriteResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentWriteResponseOld proto.InternalMessageInfo + +func (m *SegmentWriteResponseOld) GetAddressedLimits() []*AddressedOrderLimit { + if m != nil { + return m.AddressedLimits + } + return nil +} + +type SegmentCommitRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Path []byte `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Segment int64 `protobuf:"varint,3,opt,name=segment,proto3" json:"segment,omitempty"` + Pointer *Pointer `protobuf:"bytes,4,opt,name=pointer,proto3" json:"pointer,omitempty"` + OriginalLimits []*OrderLimit `protobuf:"bytes,5,rep,name=original_limits,json=originalLimits,proto3" json:"original_limits,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentCommitRequestOld) Reset() { *m = SegmentCommitRequestOld{} } +func (m *SegmentCommitRequestOld) String() string { return proto.CompactTextString(m) } +func (*SegmentCommitRequestOld) ProtoMessage() {} +func (*SegmentCommitRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{16} +} +func (m *SegmentCommitRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentCommitRequestOld.Unmarshal(m, b) +} +func (m *SegmentCommitRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentCommitRequestOld.Marshal(b, m, deterministic) +} +func (m *SegmentCommitRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentCommitRequestOld.Merge(m, src) +} +func (m *SegmentCommitRequestOld) XXX_Size() int { + return xxx_messageInfo_SegmentCommitRequestOld.Size(m) +} +func (m *SegmentCommitRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentCommitRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentCommitRequestOld proto.InternalMessageInfo + +func (m *SegmentCommitRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentCommitRequestOld) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SegmentCommitRequestOld) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *SegmentCommitRequestOld) GetSegment() int64 { + if m != nil { + return m.Segment + } + return 0 +} + +func (m *SegmentCommitRequestOld) GetPointer() *Pointer { + if m != nil { + return m.Pointer + } + return nil +} + +func (m *SegmentCommitRequestOld) GetOriginalLimits() []*OrderLimit { + if m != nil { + return m.OriginalLimits + } + return nil +} + +type SegmentCommitResponseOld struct { + Pointer *Pointer `protobuf:"bytes,1,opt,name=pointer,proto3" json:"pointer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentCommitResponseOld) Reset() { *m = SegmentCommitResponseOld{} } +func (m *SegmentCommitResponseOld) String() string { return proto.CompactTextString(m) } +func (*SegmentCommitResponseOld) ProtoMessage() {} +func (*SegmentCommitResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{17} +} +func (m *SegmentCommitResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentCommitResponseOld.Unmarshal(m, b) +} +func (m *SegmentCommitResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentCommitResponseOld.Marshal(b, m, deterministic) +} +func (m *SegmentCommitResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentCommitResponseOld.Merge(m, src) +} +func (m *SegmentCommitResponseOld) XXX_Size() int { + return xxx_messageInfo_SegmentCommitResponseOld.Size(m) +} +func (m *SegmentCommitResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentCommitResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentCommitResponseOld proto.InternalMessageInfo + +func (m *SegmentCommitResponseOld) GetPointer() *Pointer { + if m != nil { + return m.Pointer + } + return nil +} + +type SegmentDownloadRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Path []byte `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Segment int64 `protobuf:"varint,3,opt,name=segment,proto3" json:"segment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentDownloadRequestOld) Reset() { *m = SegmentDownloadRequestOld{} } +func (m *SegmentDownloadRequestOld) String() string { return proto.CompactTextString(m) } +func (*SegmentDownloadRequestOld) ProtoMessage() {} +func (*SegmentDownloadRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{18} +} +func (m *SegmentDownloadRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentDownloadRequestOld.Unmarshal(m, b) +} +func (m *SegmentDownloadRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentDownloadRequestOld.Marshal(b, m, deterministic) +} +func (m *SegmentDownloadRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentDownloadRequestOld.Merge(m, src) +} +func (m *SegmentDownloadRequestOld) XXX_Size() int { + return xxx_messageInfo_SegmentDownloadRequestOld.Size(m) +} +func (m *SegmentDownloadRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentDownloadRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentDownloadRequestOld proto.InternalMessageInfo + +func (m *SegmentDownloadRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentDownloadRequestOld) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SegmentDownloadRequestOld) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *SegmentDownloadRequestOld) GetSegment() int64 { + if m != nil { + return m.Segment + } + return 0 +} + +type SegmentDownloadResponseOld struct { + AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,1,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"` + Pointer *Pointer `protobuf:"bytes,2,opt,name=pointer,proto3" json:"pointer,omitempty"` + PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentDownloadResponseOld) Reset() { *m = SegmentDownloadResponseOld{} } +func (m *SegmentDownloadResponseOld) String() string { return proto.CompactTextString(m) } +func (*SegmentDownloadResponseOld) ProtoMessage() {} +func (*SegmentDownloadResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{19} +} +func (m *SegmentDownloadResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentDownloadResponseOld.Unmarshal(m, b) +} +func (m *SegmentDownloadResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentDownloadResponseOld.Marshal(b, m, deterministic) +} +func (m *SegmentDownloadResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentDownloadResponseOld.Merge(m, src) +} +func (m *SegmentDownloadResponseOld) XXX_Size() int { + return xxx_messageInfo_SegmentDownloadResponseOld.Size(m) +} +func (m *SegmentDownloadResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentDownloadResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentDownloadResponseOld proto.InternalMessageInfo + +func (m *SegmentDownloadResponseOld) GetAddressedLimits() []*AddressedOrderLimit { + if m != nil { + return m.AddressedLimits + } + return nil +} + +func (m *SegmentDownloadResponseOld) GetPointer() *Pointer { + if m != nil { + return m.Pointer + } + return nil +} + +type SegmentInfoRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Path []byte `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Segment int64 `protobuf:"varint,3,opt,name=segment,proto3" json:"segment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentInfoRequestOld) Reset() { *m = SegmentInfoRequestOld{} } +func (m *SegmentInfoRequestOld) String() string { return proto.CompactTextString(m) } +func (*SegmentInfoRequestOld) ProtoMessage() {} +func (*SegmentInfoRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{20} +} +func (m *SegmentInfoRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentInfoRequestOld.Unmarshal(m, b) +} +func (m *SegmentInfoRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentInfoRequestOld.Marshal(b, m, deterministic) +} +func (m *SegmentInfoRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentInfoRequestOld.Merge(m, src) +} +func (m *SegmentInfoRequestOld) XXX_Size() int { + return xxx_messageInfo_SegmentInfoRequestOld.Size(m) +} +func (m *SegmentInfoRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentInfoRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentInfoRequestOld proto.InternalMessageInfo + +func (m *SegmentInfoRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentInfoRequestOld) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SegmentInfoRequestOld) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *SegmentInfoRequestOld) GetSegment() int64 { + if m != nil { + return m.Segment + } + return 0 +} + +type SegmentInfoResponseOld struct { + Pointer *Pointer `protobuf:"bytes,2,opt,name=pointer,proto3" json:"pointer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentInfoResponseOld) Reset() { *m = SegmentInfoResponseOld{} } +func (m *SegmentInfoResponseOld) String() string { return proto.CompactTextString(m) } +func (*SegmentInfoResponseOld) ProtoMessage() {} +func (*SegmentInfoResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{21} +} +func (m *SegmentInfoResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentInfoResponseOld.Unmarshal(m, b) +} +func (m *SegmentInfoResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentInfoResponseOld.Marshal(b, m, deterministic) +} +func (m *SegmentInfoResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentInfoResponseOld.Merge(m, src) +} +func (m *SegmentInfoResponseOld) XXX_Size() int { + return xxx_messageInfo_SegmentInfoResponseOld.Size(m) +} +func (m *SegmentInfoResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentInfoResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentInfoResponseOld proto.InternalMessageInfo + +func (m *SegmentInfoResponseOld) GetPointer() *Pointer { + if m != nil { + return m.Pointer + } + return nil +} + +type SegmentDeleteRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Path []byte `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Segment int64 `protobuf:"varint,3,opt,name=segment,proto3" json:"segment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentDeleteRequestOld) Reset() { *m = SegmentDeleteRequestOld{} } +func (m *SegmentDeleteRequestOld) String() string { return proto.CompactTextString(m) } +func (*SegmentDeleteRequestOld) ProtoMessage() {} +func (*SegmentDeleteRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{22} +} +func (m *SegmentDeleteRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentDeleteRequestOld.Unmarshal(m, b) +} +func (m *SegmentDeleteRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentDeleteRequestOld.Marshal(b, m, deterministic) +} +func (m *SegmentDeleteRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentDeleteRequestOld.Merge(m, src) +} +func (m *SegmentDeleteRequestOld) XXX_Size() int { + return xxx_messageInfo_SegmentDeleteRequestOld.Size(m) +} +func (m *SegmentDeleteRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentDeleteRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentDeleteRequestOld proto.InternalMessageInfo + +func (m *SegmentDeleteRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentDeleteRequestOld) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SegmentDeleteRequestOld) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *SegmentDeleteRequestOld) GetSegment() int64 { + if m != nil { + return m.Segment + } + return 0 +} + +type SegmentDeleteResponseOld struct { + AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,1,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"` + PrivateKey PiecePrivateKey `protobuf:"bytes,2,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentDeleteResponseOld) Reset() { *m = SegmentDeleteResponseOld{} } +func (m *SegmentDeleteResponseOld) String() string { return proto.CompactTextString(m) } +func (*SegmentDeleteResponseOld) ProtoMessage() {} +func (*SegmentDeleteResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{23} +} +func (m *SegmentDeleteResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentDeleteResponseOld.Unmarshal(m, b) +} +func (m *SegmentDeleteResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentDeleteResponseOld.Marshal(b, m, deterministic) +} +func (m *SegmentDeleteResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentDeleteResponseOld.Merge(m, src) +} +func (m *SegmentDeleteResponseOld) XXX_Size() int { + return xxx_messageInfo_SegmentDeleteResponseOld.Size(m) +} +func (m *SegmentDeleteResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentDeleteResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentDeleteResponseOld proto.InternalMessageInfo + +func (m *SegmentDeleteResponseOld) GetAddressedLimits() []*AddressedOrderLimit { + if m != nil { + return m.AddressedLimits + } + return nil +} + +type ListSegmentsRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Prefix []byte `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + StartAfter []byte `protobuf:"bytes,3,opt,name=start_after,json=startAfter,proto3" json:"start_after,omitempty"` + EndBefore []byte `protobuf:"bytes,4,opt,name=end_before,json=endBefore,proto3" json:"end_before,omitempty"` // Deprecated: Do not use. + Recursive bool `protobuf:"varint,5,opt,name=recursive,proto3" json:"recursive,omitempty"` + Limit int32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` + MetaFlags uint32 `protobuf:"fixed32,7,opt,name=meta_flags,json=metaFlags,proto3" json:"meta_flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSegmentsRequestOld) Reset() { *m = ListSegmentsRequestOld{} } +func (m *ListSegmentsRequestOld) String() string { return proto.CompactTextString(m) } +func (*ListSegmentsRequestOld) ProtoMessage() {} +func (*ListSegmentsRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{24} +} +func (m *ListSegmentsRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSegmentsRequestOld.Unmarshal(m, b) +} +func (m *ListSegmentsRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSegmentsRequestOld.Marshal(b, m, deterministic) +} +func (m *ListSegmentsRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSegmentsRequestOld.Merge(m, src) +} +func (m *ListSegmentsRequestOld) XXX_Size() int { + return xxx_messageInfo_ListSegmentsRequestOld.Size(m) +} +func (m *ListSegmentsRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_ListSegmentsRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSegmentsRequestOld proto.InternalMessageInfo + +func (m *ListSegmentsRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ListSegmentsRequestOld) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ListSegmentsRequestOld) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +func (m *ListSegmentsRequestOld) GetStartAfter() []byte { + if m != nil { + return m.StartAfter + } + return nil +} + +// Deprecated: Do not use. +func (m *ListSegmentsRequestOld) GetEndBefore() []byte { + if m != nil { + return m.EndBefore + } + return nil +} + +func (m *ListSegmentsRequestOld) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +func (m *ListSegmentsRequestOld) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ListSegmentsRequestOld) GetMetaFlags() uint32 { + if m != nil { + return m.MetaFlags + } + return 0 +} + +type ListSegmentsResponseOld struct { + Items []*ListSegmentsResponseOld_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSegmentsResponseOld) Reset() { *m = ListSegmentsResponseOld{} } +func (m *ListSegmentsResponseOld) String() string { return proto.CompactTextString(m) } +func (*ListSegmentsResponseOld) ProtoMessage() {} +func (*ListSegmentsResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{25} +} +func (m *ListSegmentsResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSegmentsResponseOld.Unmarshal(m, b) +} +func (m *ListSegmentsResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSegmentsResponseOld.Marshal(b, m, deterministic) +} +func (m *ListSegmentsResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSegmentsResponseOld.Merge(m, src) +} +func (m *ListSegmentsResponseOld) XXX_Size() int { + return xxx_messageInfo_ListSegmentsResponseOld.Size(m) +} +func (m *ListSegmentsResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_ListSegmentsResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSegmentsResponseOld proto.InternalMessageInfo + +func (m *ListSegmentsResponseOld) GetItems() []*ListSegmentsResponseOld_Item { + if m != nil { + return m.Items + } + return nil +} + +func (m *ListSegmentsResponseOld) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +type ListSegmentsResponseOld_Item struct { + Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Pointer *Pointer `protobuf:"bytes,2,opt,name=pointer,proto3" json:"pointer,omitempty"` + IsPrefix bool `protobuf:"varint,3,opt,name=is_prefix,json=isPrefix,proto3" json:"is_prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSegmentsResponseOld_Item) Reset() { *m = ListSegmentsResponseOld_Item{} } +func (m *ListSegmentsResponseOld_Item) String() string { return proto.CompactTextString(m) } +func (*ListSegmentsResponseOld_Item) ProtoMessage() {} +func (*ListSegmentsResponseOld_Item) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{25, 0} +} +func (m *ListSegmentsResponseOld_Item) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSegmentsResponseOld_Item.Unmarshal(m, b) +} +func (m *ListSegmentsResponseOld_Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSegmentsResponseOld_Item.Marshal(b, m, deterministic) +} +func (m *ListSegmentsResponseOld_Item) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSegmentsResponseOld_Item.Merge(m, src) +} +func (m *ListSegmentsResponseOld_Item) XXX_Size() int { + return xxx_messageInfo_ListSegmentsResponseOld_Item.Size(m) +} +func (m *ListSegmentsResponseOld_Item) XXX_DiscardUnknown() { + xxx_messageInfo_ListSegmentsResponseOld_Item.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSegmentsResponseOld_Item proto.InternalMessageInfo + +func (m *ListSegmentsResponseOld_Item) GetPath() []byte { + if m != nil { + return m.Path + } + return nil +} + +func (m *ListSegmentsResponseOld_Item) GetPointer() *Pointer { + if m != nil { + return m.Pointer + } + return nil +} + +func (m *ListSegmentsResponseOld_Item) GetIsPrefix() bool { + if m != nil { + return m.IsPrefix + } + return false +} + +type SetAttributionRequestOld struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + BucketName []byte `protobuf:"bytes,1,opt,name=bucket_name,json=bucketName,proto3" json:"bucket_name,omitempty"` + PartnerId []byte `protobuf:"bytes,2,opt,name=partner_id,json=partnerId,proto3" json:"partner_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetAttributionRequestOld) Reset() { *m = SetAttributionRequestOld{} } +func (m *SetAttributionRequestOld) String() string { return proto.CompactTextString(m) } +func (*SetAttributionRequestOld) ProtoMessage() {} +func (*SetAttributionRequestOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{26} +} +func (m *SetAttributionRequestOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetAttributionRequestOld.Unmarshal(m, b) +} +func (m *SetAttributionRequestOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetAttributionRequestOld.Marshal(b, m, deterministic) +} +func (m *SetAttributionRequestOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetAttributionRequestOld.Merge(m, src) +} +func (m *SetAttributionRequestOld) XXX_Size() int { + return xxx_messageInfo_SetAttributionRequestOld.Size(m) +} +func (m *SetAttributionRequestOld) XXX_DiscardUnknown() { + xxx_messageInfo_SetAttributionRequestOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SetAttributionRequestOld proto.InternalMessageInfo + +func (m *SetAttributionRequestOld) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SetAttributionRequestOld) GetBucketName() []byte { + if m != nil { + return m.BucketName + } + return nil +} + +func (m *SetAttributionRequestOld) GetPartnerId() []byte { + if m != nil { + return m.PartnerId + } + return nil +} + +type SetAttributionResponseOld struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetAttributionResponseOld) Reset() { *m = SetAttributionResponseOld{} } +func (m *SetAttributionResponseOld) String() string { return proto.CompactTextString(m) } +func (*SetAttributionResponseOld) ProtoMessage() {} +func (*SetAttributionResponseOld) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{27} +} +func (m *SetAttributionResponseOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetAttributionResponseOld.Unmarshal(m, b) +} +func (m *SetAttributionResponseOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetAttributionResponseOld.Marshal(b, m, deterministic) +} +func (m *SetAttributionResponseOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetAttributionResponseOld.Merge(m, src) +} +func (m *SetAttributionResponseOld) XXX_Size() int { + return xxx_messageInfo_SetAttributionResponseOld.Size(m) +} +func (m *SetAttributionResponseOld) XXX_DiscardUnknown() { + xxx_messageInfo_SetAttributionResponseOld.DiscardUnknown(m) +} + +var xxx_messageInfo_SetAttributionResponseOld proto.InternalMessageInfo + +type ProjectInfoRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProjectInfoRequest) Reset() { *m = ProjectInfoRequest{} } +func (m *ProjectInfoRequest) String() string { return proto.CompactTextString(m) } +func (*ProjectInfoRequest) ProtoMessage() {} +func (*ProjectInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{28} +} +func (m *ProjectInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProjectInfoRequest.Unmarshal(m, b) +} +func (m *ProjectInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProjectInfoRequest.Marshal(b, m, deterministic) +} +func (m *ProjectInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectInfoRequest.Merge(m, src) +} +func (m *ProjectInfoRequest) XXX_Size() int { + return xxx_messageInfo_ProjectInfoRequest.Size(m) +} +func (m *ProjectInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectInfoRequest proto.InternalMessageInfo + +func (m *ProjectInfoRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type ProjectInfoResponse struct { + ProjectSalt []byte `protobuf:"bytes,1,opt,name=project_salt,json=projectSalt,proto3" json:"project_salt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProjectInfoResponse) Reset() { *m = ProjectInfoResponse{} } +func (m *ProjectInfoResponse) String() string { return proto.CompactTextString(m) } +func (*ProjectInfoResponse) ProtoMessage() {} +func (*ProjectInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{29} +} +func (m *ProjectInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProjectInfoResponse.Unmarshal(m, b) +} +func (m *ProjectInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProjectInfoResponse.Marshal(b, m, deterministic) +} +func (m *ProjectInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectInfoResponse.Merge(m, src) +} +func (m *ProjectInfoResponse) XXX_Size() int { + return xxx_messageInfo_ProjectInfoResponse.Size(m) +} +func (m *ProjectInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectInfoResponse proto.InternalMessageInfo + +func (m *ProjectInfoResponse) GetProjectSalt() []byte { + if m != nil { + return m.ProjectSalt + } + return nil +} + +type Object struct { + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + Status Object_Status `protobuf:"varint,4,opt,name=status,proto3,enum=metainfo.Object_Status" json:"status,omitempty"` + StreamId StreamID `protobuf:"bytes,5,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + CreatedAt time.Time `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + StatusAt time.Time `protobuf:"bytes,7,opt,name=status_at,json=statusAt,proto3,stdtime" json:"status_at"` + ExpiresAt time.Time `protobuf:"bytes,8,opt,name=expires_at,json=expiresAt,proto3,stdtime" json:"expires_at"` + EncryptedMetadataNonce Nonce `protobuf:"bytes,9,opt,name=encrypted_metadata_nonce,json=encryptedMetadataNonce,proto3,customtype=Nonce" json:"encrypted_metadata_nonce"` + EncryptedMetadata []byte `protobuf:"bytes,10,opt,name=encrypted_metadata,json=encryptedMetadata,proto3" json:"encrypted_metadata,omitempty"` + FixedSegmentSize int64 `protobuf:"varint,11,opt,name=fixed_segment_size,json=fixedSegmentSize,proto3" json:"fixed_segment_size,omitempty"` + RedundancyScheme *RedundancyScheme `protobuf:"bytes,12,opt,name=redundancy_scheme,json=redundancyScheme,proto3" json:"redundancy_scheme,omitempty"` + EncryptionParameters *EncryptionParameters `protobuf:"bytes,13,opt,name=encryption_parameters,json=encryptionParameters,proto3" json:"encryption_parameters,omitempty"` + TotalSize int64 `protobuf:"varint,14,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + InlineSize int64 `protobuf:"varint,15,opt,name=inline_size,json=inlineSize,proto3" json:"inline_size,omitempty"` + RemoteSize int64 `protobuf:"varint,16,opt,name=remote_size,json=remoteSize,proto3" json:"remote_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Object) Reset() { *m = Object{} } +func (m *Object) String() string { return proto.CompactTextString(m) } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{30} +} +func (m *Object) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Object.Unmarshal(m, b) +} +func (m *Object) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Object.Marshal(b, m, deterministic) +} +func (m *Object) XXX_Merge(src proto.Message) { + xxx_messageInfo_Object.Merge(m, src) +} +func (m *Object) XXX_Size() int { + return xxx_messageInfo_Object.Size(m) +} +func (m *Object) XXX_DiscardUnknown() { + xxx_messageInfo_Object.DiscardUnknown(m) +} + +var xxx_messageInfo_Object proto.InternalMessageInfo + +func (m *Object) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *Object) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *Object) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Object) GetStatus() Object_Status { + if m != nil { + return m.Status + } + return Object_INVALID +} + +func (m *Object) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *Object) GetStatusAt() time.Time { + if m != nil { + return m.StatusAt + } + return time.Time{} +} + +func (m *Object) GetExpiresAt() time.Time { + if m != nil { + return m.ExpiresAt + } + return time.Time{} +} + +func (m *Object) GetEncryptedMetadata() []byte { + if m != nil { + return m.EncryptedMetadata + } + return nil +} + +func (m *Object) GetFixedSegmentSize() int64 { + if m != nil { + return m.FixedSegmentSize + } + return 0 +} + +func (m *Object) GetRedundancyScheme() *RedundancyScheme { + if m != nil { + return m.RedundancyScheme + } + return nil +} + +func (m *Object) GetEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.EncryptionParameters + } + return nil +} + +func (m *Object) GetTotalSize() int64 { + if m != nil { + return m.TotalSize + } + return 0 +} + +func (m *Object) GetInlineSize() int64 { + if m != nil { + return m.InlineSize + } + return 0 +} + +func (m *Object) GetRemoteSize() int64 { + if m != nil { + return m.RemoteSize + } + return 0 +} + +type ObjectBeginRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + ExpiresAt time.Time `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3,stdtime" json:"expires_at"` + RedundancyScheme *RedundancyScheme `protobuf:"bytes,7,opt,name=redundancy_scheme,json=redundancyScheme,proto3" json:"redundancy_scheme,omitempty"` + EncryptionParameters *EncryptionParameters `protobuf:"bytes,8,opt,name=encryption_parameters,json=encryptionParameters,proto3" json:"encryption_parameters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectBeginRequest) Reset() { *m = ObjectBeginRequest{} } +func (m *ObjectBeginRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectBeginRequest) ProtoMessage() {} +func (*ObjectBeginRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{31} +} +func (m *ObjectBeginRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectBeginRequest.Unmarshal(m, b) +} +func (m *ObjectBeginRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectBeginRequest.Marshal(b, m, deterministic) +} +func (m *ObjectBeginRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectBeginRequest.Merge(m, src) +} +func (m *ObjectBeginRequest) XXX_Size() int { + return xxx_messageInfo_ObjectBeginRequest.Size(m) +} +func (m *ObjectBeginRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectBeginRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectBeginRequest proto.InternalMessageInfo + +func (m *ObjectBeginRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ObjectBeginRequest) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ObjectBeginRequest) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *ObjectBeginRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *ObjectBeginRequest) GetExpiresAt() time.Time { + if m != nil { + return m.ExpiresAt + } + return time.Time{} +} + +func (m *ObjectBeginRequest) GetRedundancyScheme() *RedundancyScheme { + if m != nil { + return m.RedundancyScheme + } + return nil +} + +func (m *ObjectBeginRequest) GetEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.EncryptionParameters + } + return nil +} + +type ObjectBeginResponse struct { + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + StreamId StreamID `protobuf:"bytes,4,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + RedundancyScheme *RedundancyScheme `protobuf:"bytes,5,opt,name=redundancy_scheme,json=redundancyScheme,proto3" json:"redundancy_scheme,omitempty"` + EncryptionParameters *EncryptionParameters `protobuf:"bytes,6,opt,name=encryption_parameters,json=encryptionParameters,proto3" json:"encryption_parameters,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectBeginResponse) Reset() { *m = ObjectBeginResponse{} } +func (m *ObjectBeginResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectBeginResponse) ProtoMessage() {} +func (*ObjectBeginResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{32} +} +func (m *ObjectBeginResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectBeginResponse.Unmarshal(m, b) +} +func (m *ObjectBeginResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectBeginResponse.Marshal(b, m, deterministic) +} +func (m *ObjectBeginResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectBeginResponse.Merge(m, src) +} +func (m *ObjectBeginResponse) XXX_Size() int { + return xxx_messageInfo_ObjectBeginResponse.Size(m) +} +func (m *ObjectBeginResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectBeginResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectBeginResponse proto.InternalMessageInfo + +func (m *ObjectBeginResponse) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ObjectBeginResponse) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *ObjectBeginResponse) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *ObjectBeginResponse) GetRedundancyScheme() *RedundancyScheme { + if m != nil { + return m.RedundancyScheme + } + return nil +} + +func (m *ObjectBeginResponse) GetEncryptionParameters() *EncryptionParameters { + if m != nil { + return m.EncryptionParameters + } + return nil +} + +type ObjectCommitRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + EncryptedMetadataNonce Nonce `protobuf:"bytes,2,opt,name=encrypted_metadata_nonce,json=encryptedMetadataNonce,proto3,customtype=Nonce" json:"encrypted_metadata_nonce"` + EncryptedMetadata []byte `protobuf:"bytes,3,opt,name=encrypted_metadata,json=encryptedMetadata,proto3" json:"encrypted_metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectCommitRequest) Reset() { *m = ObjectCommitRequest{} } +func (m *ObjectCommitRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectCommitRequest) ProtoMessage() {} +func (*ObjectCommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{33} +} +func (m *ObjectCommitRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectCommitRequest.Unmarshal(m, b) +} +func (m *ObjectCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectCommitRequest.Marshal(b, m, deterministic) +} +func (m *ObjectCommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectCommitRequest.Merge(m, src) +} +func (m *ObjectCommitRequest) XXX_Size() int { + return xxx_messageInfo_ObjectCommitRequest.Size(m) +} +func (m *ObjectCommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectCommitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectCommitRequest proto.InternalMessageInfo + +func (m *ObjectCommitRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ObjectCommitRequest) GetEncryptedMetadata() []byte { + if m != nil { + return m.EncryptedMetadata + } + return nil +} + +type ObjectCommitResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectCommitResponse) Reset() { *m = ObjectCommitResponse{} } +func (m *ObjectCommitResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectCommitResponse) ProtoMessage() {} +func (*ObjectCommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{34} +} +func (m *ObjectCommitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectCommitResponse.Unmarshal(m, b) +} +func (m *ObjectCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectCommitResponse.Marshal(b, m, deterministic) +} +func (m *ObjectCommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectCommitResponse.Merge(m, src) +} +func (m *ObjectCommitResponse) XXX_Size() int { + return xxx_messageInfo_ObjectCommitResponse.Size(m) +} +func (m *ObjectCommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectCommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectCommitResponse proto.InternalMessageInfo + +type ObjectGetRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectGetRequest) Reset() { *m = ObjectGetRequest{} } +func (m *ObjectGetRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectGetRequest) ProtoMessage() {} +func (*ObjectGetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{35} +} +func (m *ObjectGetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectGetRequest.Unmarshal(m, b) +} +func (m *ObjectGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectGetRequest.Marshal(b, m, deterministic) +} +func (m *ObjectGetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectGetRequest.Merge(m, src) +} +func (m *ObjectGetRequest) XXX_Size() int { + return xxx_messageInfo_ObjectGetRequest.Size(m) +} +func (m *ObjectGetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectGetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectGetRequest proto.InternalMessageInfo + +func (m *ObjectGetRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ObjectGetRequest) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ObjectGetRequest) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *ObjectGetRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +type ObjectGetResponse struct { + Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectGetResponse) Reset() { *m = ObjectGetResponse{} } +func (m *ObjectGetResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectGetResponse) ProtoMessage() {} +func (*ObjectGetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{36} +} +func (m *ObjectGetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectGetResponse.Unmarshal(m, b) +} +func (m *ObjectGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectGetResponse.Marshal(b, m, deterministic) +} +func (m *ObjectGetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectGetResponse.Merge(m, src) +} +func (m *ObjectGetResponse) XXX_Size() int { + return xxx_messageInfo_ObjectGetResponse.Size(m) +} +func (m *ObjectGetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectGetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectGetResponse proto.InternalMessageInfo + +func (m *ObjectGetResponse) GetObject() *Object { + if m != nil { + return m.Object + } + return nil +} + +type ObjectListRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPrefix []byte `protobuf:"bytes,2,opt,name=encrypted_prefix,json=encryptedPrefix,proto3" json:"encrypted_prefix,omitempty"` + EncryptedCursor []byte `protobuf:"bytes,3,opt,name=encrypted_cursor,json=encryptedCursor,proto3" json:"encrypted_cursor,omitempty"` + Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"` + Limit int32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` + ObjectIncludes *ObjectListItemIncludes `protobuf:"bytes,6,opt,name=object_includes,json=objectIncludes,proto3" json:"object_includes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectListRequest) Reset() { *m = ObjectListRequest{} } +func (m *ObjectListRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectListRequest) ProtoMessage() {} +func (*ObjectListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{37} +} +func (m *ObjectListRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectListRequest.Unmarshal(m, b) +} +func (m *ObjectListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectListRequest.Marshal(b, m, deterministic) +} +func (m *ObjectListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectListRequest.Merge(m, src) +} +func (m *ObjectListRequest) XXX_Size() int { + return xxx_messageInfo_ObjectListRequest.Size(m) +} +func (m *ObjectListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectListRequest proto.InternalMessageInfo + +func (m *ObjectListRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ObjectListRequest) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ObjectListRequest) GetEncryptedPrefix() []byte { + if m != nil { + return m.EncryptedPrefix + } + return nil +} + +func (m *ObjectListRequest) GetEncryptedCursor() []byte { + if m != nil { + return m.EncryptedCursor + } + return nil +} + +func (m *ObjectListRequest) GetRecursive() bool { + if m != nil { + return m.Recursive + } + return false +} + +func (m *ObjectListRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ObjectListRequest) GetObjectIncludes() *ObjectListItemIncludes { + if m != nil { + return m.ObjectIncludes + } + return nil +} + +type ObjectListResponse struct { + Items []*ObjectListItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectListResponse) Reset() { *m = ObjectListResponse{} } +func (m *ObjectListResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectListResponse) ProtoMessage() {} +func (*ObjectListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{38} +} +func (m *ObjectListResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectListResponse.Unmarshal(m, b) +} +func (m *ObjectListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectListResponse.Marshal(b, m, deterministic) +} +func (m *ObjectListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectListResponse.Merge(m, src) +} +func (m *ObjectListResponse) XXX_Size() int { + return xxx_messageInfo_ObjectListResponse.Size(m) +} +func (m *ObjectListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectListResponse proto.InternalMessageInfo + +func (m *ObjectListResponse) GetItems() []*ObjectListItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *ObjectListResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +type ObjectListItem struct { + EncryptedPath []byte `protobuf:"bytes,1,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Status Object_Status `protobuf:"varint,3,opt,name=status,proto3,enum=metainfo.Object_Status" json:"status,omitempty"` + CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + StatusAt time.Time `protobuf:"bytes,5,opt,name=status_at,json=statusAt,proto3,stdtime" json:"status_at"` + ExpiresAt time.Time `protobuf:"bytes,6,opt,name=expires_at,json=expiresAt,proto3,stdtime" json:"expires_at"` + EncryptedMetadataNonce Nonce `protobuf:"bytes,7,opt,name=encrypted_metadata_nonce,json=encryptedMetadataNonce,proto3,customtype=Nonce" json:"encrypted_metadata_nonce"` + EncryptedMetadata []byte `protobuf:"bytes,8,opt,name=encrypted_metadata,json=encryptedMetadata,proto3" json:"encrypted_metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectListItem) Reset() { *m = ObjectListItem{} } +func (m *ObjectListItem) String() string { return proto.CompactTextString(m) } +func (*ObjectListItem) ProtoMessage() {} +func (*ObjectListItem) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{39} +} +func (m *ObjectListItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectListItem.Unmarshal(m, b) +} +func (m *ObjectListItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectListItem.Marshal(b, m, deterministic) +} +func (m *ObjectListItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectListItem.Merge(m, src) +} +func (m *ObjectListItem) XXX_Size() int { + return xxx_messageInfo_ObjectListItem.Size(m) +} +func (m *ObjectListItem) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectListItem.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectListItem proto.InternalMessageInfo + +func (m *ObjectListItem) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *ObjectListItem) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *ObjectListItem) GetStatus() Object_Status { + if m != nil { + return m.Status + } + return Object_INVALID +} + +func (m *ObjectListItem) GetCreatedAt() time.Time { + if m != nil { + return m.CreatedAt + } + return time.Time{} +} + +func (m *ObjectListItem) GetStatusAt() time.Time { + if m != nil { + return m.StatusAt + } + return time.Time{} +} + +func (m *ObjectListItem) GetExpiresAt() time.Time { + if m != nil { + return m.ExpiresAt + } + return time.Time{} +} + +func (m *ObjectListItem) GetEncryptedMetadata() []byte { + if m != nil { + return m.EncryptedMetadata + } + return nil +} + +type ObjectListItemIncludes struct { + Metadata bool `protobuf:"varint,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectListItemIncludes) Reset() { *m = ObjectListItemIncludes{} } +func (m *ObjectListItemIncludes) String() string { return proto.CompactTextString(m) } +func (*ObjectListItemIncludes) ProtoMessage() {} +func (*ObjectListItemIncludes) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{40} +} +func (m *ObjectListItemIncludes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectListItemIncludes.Unmarshal(m, b) +} +func (m *ObjectListItemIncludes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectListItemIncludes.Marshal(b, m, deterministic) +} +func (m *ObjectListItemIncludes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectListItemIncludes.Merge(m, src) +} +func (m *ObjectListItemIncludes) XXX_Size() int { + return xxx_messageInfo_ObjectListItemIncludes.Size(m) +} +func (m *ObjectListItemIncludes) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectListItemIncludes.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectListItemIncludes proto.InternalMessageInfo + +func (m *ObjectListItemIncludes) GetMetadata() bool { + if m != nil { + return m.Metadata + } + return false +} + +type ObjectBeginDeleteRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectBeginDeleteRequest) Reset() { *m = ObjectBeginDeleteRequest{} } +func (m *ObjectBeginDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectBeginDeleteRequest) ProtoMessage() {} +func (*ObjectBeginDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{41} +} +func (m *ObjectBeginDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectBeginDeleteRequest.Unmarshal(m, b) +} +func (m *ObjectBeginDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectBeginDeleteRequest.Marshal(b, m, deterministic) +} +func (m *ObjectBeginDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectBeginDeleteRequest.Merge(m, src) +} +func (m *ObjectBeginDeleteRequest) XXX_Size() int { + return xxx_messageInfo_ObjectBeginDeleteRequest.Size(m) +} +func (m *ObjectBeginDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectBeginDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectBeginDeleteRequest proto.InternalMessageInfo + +func (m *ObjectBeginDeleteRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *ObjectBeginDeleteRequest) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *ObjectBeginDeleteRequest) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *ObjectBeginDeleteRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +type ObjectBeginDeleteResponse struct { + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + Object *Object `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectBeginDeleteResponse) Reset() { *m = ObjectBeginDeleteResponse{} } +func (m *ObjectBeginDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectBeginDeleteResponse) ProtoMessage() {} +func (*ObjectBeginDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{42} +} +func (m *ObjectBeginDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectBeginDeleteResponse.Unmarshal(m, b) +} +func (m *ObjectBeginDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectBeginDeleteResponse.Marshal(b, m, deterministic) +} +func (m *ObjectBeginDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectBeginDeleteResponse.Merge(m, src) +} +func (m *ObjectBeginDeleteResponse) XXX_Size() int { + return xxx_messageInfo_ObjectBeginDeleteResponse.Size(m) +} +func (m *ObjectBeginDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectBeginDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectBeginDeleteResponse proto.InternalMessageInfo + +func (m *ObjectBeginDeleteResponse) GetObject() *Object { + if m != nil { + return m.Object + } + return nil +} + +type ObjectFinishDeleteRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectFinishDeleteRequest) Reset() { *m = ObjectFinishDeleteRequest{} } +func (m *ObjectFinishDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*ObjectFinishDeleteRequest) ProtoMessage() {} +func (*ObjectFinishDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{43} +} +func (m *ObjectFinishDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectFinishDeleteRequest.Unmarshal(m, b) +} +func (m *ObjectFinishDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectFinishDeleteRequest.Marshal(b, m, deterministic) +} +func (m *ObjectFinishDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFinishDeleteRequest.Merge(m, src) +} +func (m *ObjectFinishDeleteRequest) XXX_Size() int { + return xxx_messageInfo_ObjectFinishDeleteRequest.Size(m) +} +func (m *ObjectFinishDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFinishDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectFinishDeleteRequest proto.InternalMessageInfo + +func (m *ObjectFinishDeleteRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +type ObjectFinishDeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ObjectFinishDeleteResponse) Reset() { *m = ObjectFinishDeleteResponse{} } +func (m *ObjectFinishDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*ObjectFinishDeleteResponse) ProtoMessage() {} +func (*ObjectFinishDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{44} +} +func (m *ObjectFinishDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ObjectFinishDeleteResponse.Unmarshal(m, b) +} +func (m *ObjectFinishDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ObjectFinishDeleteResponse.Marshal(b, m, deterministic) +} +func (m *ObjectFinishDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFinishDeleteResponse.Merge(m, src) +} +func (m *ObjectFinishDeleteResponse) XXX_Size() int { + return xxx_messageInfo_ObjectFinishDeleteResponse.Size(m) +} +func (m *ObjectFinishDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFinishDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectFinishDeleteResponse proto.InternalMessageInfo + +// only for satellite use +type SatStreamID struct { + Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"` + Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + Redundancy *RedundancyScheme `protobuf:"bytes,4,opt,name=redundancy,proto3" json:"redundancy,omitempty"` + CreationDate time.Time `protobuf:"bytes,5,opt,name=creation_date,json=creationDate,proto3,stdtime" json:"creation_date"` + ExpirationDate time.Time `protobuf:"bytes,6,opt,name=expiration_date,json=expirationDate,proto3,stdtime" json:"expiration_date"` + SatelliteSignature []byte `protobuf:"bytes,9,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SatStreamID) Reset() { *m = SatStreamID{} } +func (m *SatStreamID) String() string { return proto.CompactTextString(m) } +func (*SatStreamID) ProtoMessage() {} +func (*SatStreamID) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{45} +} +func (m *SatStreamID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SatStreamID.Unmarshal(m, b) +} +func (m *SatStreamID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SatStreamID.Marshal(b, m, deterministic) +} +func (m *SatStreamID) XXX_Merge(src proto.Message) { + xxx_messageInfo_SatStreamID.Merge(m, src) +} +func (m *SatStreamID) XXX_Size() int { + return xxx_messageInfo_SatStreamID.Size(m) +} +func (m *SatStreamID) XXX_DiscardUnknown() { + xxx_messageInfo_SatStreamID.DiscardUnknown(m) +} + +var xxx_messageInfo_SatStreamID proto.InternalMessageInfo + +func (m *SatStreamID) GetBucket() []byte { + if m != nil { + return m.Bucket + } + return nil +} + +func (m *SatStreamID) GetEncryptedPath() []byte { + if m != nil { + return m.EncryptedPath + } + return nil +} + +func (m *SatStreamID) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *SatStreamID) GetRedundancy() *RedundancyScheme { + if m != nil { + return m.Redundancy + } + return nil +} + +func (m *SatStreamID) GetCreationDate() time.Time { + if m != nil { + return m.CreationDate + } + return time.Time{} +} + +func (m *SatStreamID) GetExpirationDate() time.Time { + if m != nil { + return m.ExpirationDate + } + return time.Time{} +} + +func (m *SatStreamID) GetSatelliteSignature() []byte { + if m != nil { + return m.SatelliteSignature + } + return nil +} + +type Segment struct { + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + Position *SegmentPosition `protobuf:"bytes,2,opt,name=position,proto3" json:"position,omitempty"` + EncryptedKeyNonce Nonce `protobuf:"bytes,3,opt,name=encrypted_key_nonce,json=encryptedKeyNonce,proto3,customtype=Nonce" json:"encrypted_key_nonce"` + EncryptedKey []byte `protobuf:"bytes,4,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + SizeEncryptedData int64 `protobuf:"varint,5,opt,name=size_encrypted_data,json=sizeEncryptedData,proto3" json:"size_encrypted_data,omitempty"` + EncryptedInlineData []byte `protobuf:"bytes,6,opt,name=encrypted_inline_data,json=encryptedInlineData,proto3" json:"encrypted_inline_data,omitempty"` + Pieces []*Piece `protobuf:"bytes,7,rep,name=pieces,proto3" json:"pieces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Segment) Reset() { *m = Segment{} } +func (m *Segment) String() string { return proto.CompactTextString(m) } +func (*Segment) ProtoMessage() {} +func (*Segment) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{46} +} +func (m *Segment) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Segment.Unmarshal(m, b) +} +func (m *Segment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Segment.Marshal(b, m, deterministic) +} +func (m *Segment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Segment.Merge(m, src) +} +func (m *Segment) XXX_Size() int { + return xxx_messageInfo_Segment.Size(m) +} +func (m *Segment) XXX_DiscardUnknown() { + xxx_messageInfo_Segment.DiscardUnknown(m) +} + +var xxx_messageInfo_Segment proto.InternalMessageInfo + +func (m *Segment) GetPosition() *SegmentPosition { + if m != nil { + return m.Position + } + return nil +} + +func (m *Segment) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *Segment) GetSizeEncryptedData() int64 { + if m != nil { + return m.SizeEncryptedData + } + return 0 +} + +func (m *Segment) GetEncryptedInlineData() []byte { + if m != nil { + return m.EncryptedInlineData + } + return nil +} + +func (m *Segment) GetPieces() []*Piece { + if m != nil { + return m.Pieces + } + return nil +} + +type Piece struct { + PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"` + Node NodeID `protobuf:"bytes,2,opt,name=node,proto3,customtype=NodeID" json:"node"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Piece) Reset() { *m = Piece{} } +func (m *Piece) String() string { return proto.CompactTextString(m) } +func (*Piece) ProtoMessage() {} +func (*Piece) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{47} +} +func (m *Piece) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Piece.Unmarshal(m, b) +} +func (m *Piece) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Piece.Marshal(b, m, deterministic) +} +func (m *Piece) XXX_Merge(src proto.Message) { + xxx_messageInfo_Piece.Merge(m, src) +} +func (m *Piece) XXX_Size() int { + return xxx_messageInfo_Piece.Size(m) +} +func (m *Piece) XXX_DiscardUnknown() { + xxx_messageInfo_Piece.DiscardUnknown(m) +} + +var xxx_messageInfo_Piece proto.InternalMessageInfo + +func (m *Piece) GetPieceNum() int32 { + if m != nil { + return m.PieceNum + } + return 0 +} + +type SegmentPosition struct { + PartNumber int32 `protobuf:"varint,1,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + Index int32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentPosition) Reset() { *m = SegmentPosition{} } +func (m *SegmentPosition) String() string { return proto.CompactTextString(m) } +func (*SegmentPosition) ProtoMessage() {} +func (*SegmentPosition) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{48} +} +func (m *SegmentPosition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentPosition.Unmarshal(m, b) +} +func (m *SegmentPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentPosition.Marshal(b, m, deterministic) +} +func (m *SegmentPosition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentPosition.Merge(m, src) +} +func (m *SegmentPosition) XXX_Size() int { + return xxx_messageInfo_SegmentPosition.Size(m) +} +func (m *SegmentPosition) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentPosition.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentPosition proto.InternalMessageInfo + +func (m *SegmentPosition) GetPartNumber() int32 { + if m != nil { + return m.PartNumber + } + return 0 +} + +func (m *SegmentPosition) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +type SegmentBeginRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + Position *SegmentPosition `protobuf:"bytes,2,opt,name=position,proto3" json:"position,omitempty"` + MaxOrderLimit int64 `protobuf:"varint,3,opt,name=max_order_limit,json=maxOrderLimit,proto3" json:"max_order_limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentBeginRequest) Reset() { *m = SegmentBeginRequest{} } +func (m *SegmentBeginRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentBeginRequest) ProtoMessage() {} +func (*SegmentBeginRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{49} +} +func (m *SegmentBeginRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentBeginRequest.Unmarshal(m, b) +} +func (m *SegmentBeginRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentBeginRequest.Marshal(b, m, deterministic) +} +func (m *SegmentBeginRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentBeginRequest.Merge(m, src) +} +func (m *SegmentBeginRequest) XXX_Size() int { + return xxx_messageInfo_SegmentBeginRequest.Size(m) +} +func (m *SegmentBeginRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentBeginRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentBeginRequest proto.InternalMessageInfo + +func (m *SegmentBeginRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentBeginRequest) GetPosition() *SegmentPosition { + if m != nil { + return m.Position + } + return nil +} + +func (m *SegmentBeginRequest) GetMaxOrderLimit() int64 { + if m != nil { + return m.MaxOrderLimit + } + return 0 +} + +type SegmentBeginResponse struct { + SegmentId SegmentID `protobuf:"bytes,1,opt,name=segment_id,json=segmentId,proto3,customtype=SegmentID" json:"segment_id"` + AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,2,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"` + PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentBeginResponse) Reset() { *m = SegmentBeginResponse{} } +func (m *SegmentBeginResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentBeginResponse) ProtoMessage() {} +func (*SegmentBeginResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{50} +} +func (m *SegmentBeginResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentBeginResponse.Unmarshal(m, b) +} +func (m *SegmentBeginResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentBeginResponse.Marshal(b, m, deterministic) +} +func (m *SegmentBeginResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentBeginResponse.Merge(m, src) +} +func (m *SegmentBeginResponse) XXX_Size() int { + return xxx_messageInfo_SegmentBeginResponse.Size(m) +} +func (m *SegmentBeginResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentBeginResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentBeginResponse proto.InternalMessageInfo + +func (m *SegmentBeginResponse) GetAddressedLimits() []*AddressedOrderLimit { + if m != nil { + return m.AddressedLimits + } + return nil +} + +type SegmentCommitRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + SegmentId SegmentID `protobuf:"bytes,1,opt,name=segment_id,json=segmentId,proto3,customtype=SegmentID" json:"segment_id"` + EncryptedKeyNonce Nonce `protobuf:"bytes,2,opt,name=encrypted_key_nonce,json=encryptedKeyNonce,proto3,customtype=Nonce" json:"encrypted_key_nonce"` + EncryptedKey []byte `protobuf:"bytes,3,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + SizeEncryptedData int64 `protobuf:"varint,4,opt,name=size_encrypted_data,json=sizeEncryptedData,proto3" json:"size_encrypted_data,omitempty"` + UploadResult []*SegmentPieceUploadResult `protobuf:"bytes,5,rep,name=upload_result,json=uploadResult,proto3" json:"upload_result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentCommitRequest) Reset() { *m = SegmentCommitRequest{} } +func (m *SegmentCommitRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentCommitRequest) ProtoMessage() {} +func (*SegmentCommitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{51} +} +func (m *SegmentCommitRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentCommitRequest.Unmarshal(m, b) +} +func (m *SegmentCommitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentCommitRequest.Marshal(b, m, deterministic) +} +func (m *SegmentCommitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentCommitRequest.Merge(m, src) +} +func (m *SegmentCommitRequest) XXX_Size() int { + return xxx_messageInfo_SegmentCommitRequest.Size(m) +} +func (m *SegmentCommitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentCommitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentCommitRequest proto.InternalMessageInfo + +func (m *SegmentCommitRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentCommitRequest) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *SegmentCommitRequest) GetSizeEncryptedData() int64 { + if m != nil { + return m.SizeEncryptedData + } + return 0 +} + +func (m *SegmentCommitRequest) GetUploadResult() []*SegmentPieceUploadResult { + if m != nil { + return m.UploadResult + } + return nil +} + +type SegmentPieceUploadResult struct { + PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"` + NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + Hash *PieceHash `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentPieceUploadResult) Reset() { *m = SegmentPieceUploadResult{} } +func (m *SegmentPieceUploadResult) String() string { return proto.CompactTextString(m) } +func (*SegmentPieceUploadResult) ProtoMessage() {} +func (*SegmentPieceUploadResult) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{52} +} +func (m *SegmentPieceUploadResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentPieceUploadResult.Unmarshal(m, b) +} +func (m *SegmentPieceUploadResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentPieceUploadResult.Marshal(b, m, deterministic) +} +func (m *SegmentPieceUploadResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentPieceUploadResult.Merge(m, src) +} +func (m *SegmentPieceUploadResult) XXX_Size() int { + return xxx_messageInfo_SegmentPieceUploadResult.Size(m) +} +func (m *SegmentPieceUploadResult) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentPieceUploadResult.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentPieceUploadResult proto.InternalMessageInfo + +func (m *SegmentPieceUploadResult) GetPieceNum() int32 { + if m != nil { + return m.PieceNum + } + return 0 +} + +func (m *SegmentPieceUploadResult) GetHash() *PieceHash { + if m != nil { + return m.Hash + } + return nil +} + +// only for satellite use +type SatSegmentID struct { + StreamId *SatStreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"` + PartNumber int32 `protobuf:"varint,2,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"` + Index int32 `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` + // TODO we have redundancy in SatStreamID, do we need it here? + // pointerdb.RedundancyScheme redundancy = 4; + RootPieceId PieceID `protobuf:"bytes,5,opt,name=root_piece_id,json=rootPieceId,proto3,customtype=PieceID" json:"root_piece_id"` + OriginalOrderLimits []*AddressedOrderLimit `protobuf:"bytes,6,rep,name=original_order_limits,json=originalOrderLimits,proto3" json:"original_order_limits,omitempty"` + CreationDate time.Time `protobuf:"bytes,7,opt,name=creation_date,json=creationDate,proto3,stdtime" json:"creation_date"` + SatelliteSignature []byte `protobuf:"bytes,8,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SatSegmentID) Reset() { *m = SatSegmentID{} } +func (m *SatSegmentID) String() string { return proto.CompactTextString(m) } +func (*SatSegmentID) ProtoMessage() {} +func (*SatSegmentID) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{53} +} +func (m *SatSegmentID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SatSegmentID.Unmarshal(m, b) +} +func (m *SatSegmentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SatSegmentID.Marshal(b, m, deterministic) +} +func (m *SatSegmentID) XXX_Merge(src proto.Message) { + xxx_messageInfo_SatSegmentID.Merge(m, src) +} +func (m *SatSegmentID) XXX_Size() int { + return xxx_messageInfo_SatSegmentID.Size(m) +} +func (m *SatSegmentID) XXX_DiscardUnknown() { + xxx_messageInfo_SatSegmentID.DiscardUnknown(m) +} + +var xxx_messageInfo_SatSegmentID proto.InternalMessageInfo + +func (m *SatSegmentID) GetStreamId() *SatStreamID { + if m != nil { + return m.StreamId + } + return nil +} + +func (m *SatSegmentID) GetPartNumber() int32 { + if m != nil { + return m.PartNumber + } + return 0 +} + +func (m *SatSegmentID) GetIndex() int32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *SatSegmentID) GetOriginalOrderLimits() []*AddressedOrderLimit { + if m != nil { + return m.OriginalOrderLimits + } + return nil +} + +func (m *SatSegmentID) GetCreationDate() time.Time { + if m != nil { + return m.CreationDate + } + return time.Time{} +} + +func (m *SatSegmentID) GetSatelliteSignature() []byte { + if m != nil { + return m.SatelliteSignature + } + return nil +} + +type SegmentCommitResponse struct { + SuccessfulPieces int32 `protobuf:"varint,1,opt,name=successful_pieces,json=successfulPieces,proto3" json:"successful_pieces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentCommitResponse) Reset() { *m = SegmentCommitResponse{} } +func (m *SegmentCommitResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentCommitResponse) ProtoMessage() {} +func (*SegmentCommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{54} +} +func (m *SegmentCommitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentCommitResponse.Unmarshal(m, b) +} +func (m *SegmentCommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentCommitResponse.Marshal(b, m, deterministic) +} +func (m *SegmentCommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentCommitResponse.Merge(m, src) +} +func (m *SegmentCommitResponse) XXX_Size() int { + return xxx_messageInfo_SegmentCommitResponse.Size(m) +} +func (m *SegmentCommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentCommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentCommitResponse proto.InternalMessageInfo + +func (m *SegmentCommitResponse) GetSuccessfulPieces() int32 { + if m != nil { + return m.SuccessfulPieces + } + return 0 +} + +type SegmentMakeInlineRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + Position *SegmentPosition `protobuf:"bytes,2,opt,name=position,proto3" json:"position,omitempty"` + EncryptedKeyNonce Nonce `protobuf:"bytes,3,opt,name=encrypted_key_nonce,json=encryptedKeyNonce,proto3,customtype=Nonce" json:"encrypted_key_nonce"` + EncryptedKey []byte `protobuf:"bytes,4,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + EncryptedInlineData []byte `protobuf:"bytes,5,opt,name=encrypted_inline_data,json=encryptedInlineData,proto3" json:"encrypted_inline_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentMakeInlineRequest) Reset() { *m = SegmentMakeInlineRequest{} } +func (m *SegmentMakeInlineRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentMakeInlineRequest) ProtoMessage() {} +func (*SegmentMakeInlineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{55} +} +func (m *SegmentMakeInlineRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentMakeInlineRequest.Unmarshal(m, b) +} +func (m *SegmentMakeInlineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentMakeInlineRequest.Marshal(b, m, deterministic) +} +func (m *SegmentMakeInlineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentMakeInlineRequest.Merge(m, src) +} +func (m *SegmentMakeInlineRequest) XXX_Size() int { + return xxx_messageInfo_SegmentMakeInlineRequest.Size(m) +} +func (m *SegmentMakeInlineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentMakeInlineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentMakeInlineRequest proto.InternalMessageInfo + +func (m *SegmentMakeInlineRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentMakeInlineRequest) GetPosition() *SegmentPosition { + if m != nil { + return m.Position + } + return nil +} + +func (m *SegmentMakeInlineRequest) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *SegmentMakeInlineRequest) GetEncryptedInlineData() []byte { + if m != nil { + return m.EncryptedInlineData + } + return nil +} + +type SegmentMakeInlineResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentMakeInlineResponse) Reset() { *m = SegmentMakeInlineResponse{} } +func (m *SegmentMakeInlineResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentMakeInlineResponse) ProtoMessage() {} +func (*SegmentMakeInlineResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{56} +} +func (m *SegmentMakeInlineResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentMakeInlineResponse.Unmarshal(m, b) +} +func (m *SegmentMakeInlineResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentMakeInlineResponse.Marshal(b, m, deterministic) +} +func (m *SegmentMakeInlineResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentMakeInlineResponse.Merge(m, src) +} +func (m *SegmentMakeInlineResponse) XXX_Size() int { + return xxx_messageInfo_SegmentMakeInlineResponse.Size(m) +} +func (m *SegmentMakeInlineResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentMakeInlineResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentMakeInlineResponse proto.InternalMessageInfo + +type SegmentBeginDeleteRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + Position *SegmentPosition `protobuf:"bytes,2,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentBeginDeleteRequest) Reset() { *m = SegmentBeginDeleteRequest{} } +func (m *SegmentBeginDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentBeginDeleteRequest) ProtoMessage() {} +func (*SegmentBeginDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{57} +} +func (m *SegmentBeginDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentBeginDeleteRequest.Unmarshal(m, b) +} +func (m *SegmentBeginDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentBeginDeleteRequest.Marshal(b, m, deterministic) +} +func (m *SegmentBeginDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentBeginDeleteRequest.Merge(m, src) +} +func (m *SegmentBeginDeleteRequest) XXX_Size() int { + return xxx_messageInfo_SegmentBeginDeleteRequest.Size(m) +} +func (m *SegmentBeginDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentBeginDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentBeginDeleteRequest proto.InternalMessageInfo + +func (m *SegmentBeginDeleteRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentBeginDeleteRequest) GetPosition() *SegmentPosition { + if m != nil { + return m.Position + } + return nil +} + +type SegmentBeginDeleteResponse struct { + SegmentId SegmentID `protobuf:"bytes,1,opt,name=segment_id,json=segmentId,proto3,customtype=SegmentID" json:"segment_id"` + AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,2,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"` + PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentBeginDeleteResponse) Reset() { *m = SegmentBeginDeleteResponse{} } +func (m *SegmentBeginDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentBeginDeleteResponse) ProtoMessage() {} +func (*SegmentBeginDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{58} +} +func (m *SegmentBeginDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentBeginDeleteResponse.Unmarshal(m, b) +} +func (m *SegmentBeginDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentBeginDeleteResponse.Marshal(b, m, deterministic) +} +func (m *SegmentBeginDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentBeginDeleteResponse.Merge(m, src) +} +func (m *SegmentBeginDeleteResponse) XXX_Size() int { + return xxx_messageInfo_SegmentBeginDeleteResponse.Size(m) +} +func (m *SegmentBeginDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentBeginDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentBeginDeleteResponse proto.InternalMessageInfo + +func (m *SegmentBeginDeleteResponse) GetAddressedLimits() []*AddressedOrderLimit { + if m != nil { + return m.AddressedLimits + } + return nil +} + +type SegmentFinishDeleteRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + SegmentId SegmentID `protobuf:"bytes,1,opt,name=segment_id,json=segmentId,proto3,customtype=SegmentID" json:"segment_id"` + Results []*SegmentPieceDeleteResult `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentFinishDeleteRequest) Reset() { *m = SegmentFinishDeleteRequest{} } +func (m *SegmentFinishDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentFinishDeleteRequest) ProtoMessage() {} +func (*SegmentFinishDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{59} +} +func (m *SegmentFinishDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentFinishDeleteRequest.Unmarshal(m, b) +} +func (m *SegmentFinishDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentFinishDeleteRequest.Marshal(b, m, deterministic) +} +func (m *SegmentFinishDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentFinishDeleteRequest.Merge(m, src) +} +func (m *SegmentFinishDeleteRequest) XXX_Size() int { + return xxx_messageInfo_SegmentFinishDeleteRequest.Size(m) +} +func (m *SegmentFinishDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentFinishDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentFinishDeleteRequest proto.InternalMessageInfo + +func (m *SegmentFinishDeleteRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentFinishDeleteRequest) GetResults() []*SegmentPieceDeleteResult { + if m != nil { + return m.Results + } + return nil +} + +type SegmentPieceDeleteResult struct { + PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"` + NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + Hash *PieceHash `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentPieceDeleteResult) Reset() { *m = SegmentPieceDeleteResult{} } +func (m *SegmentPieceDeleteResult) String() string { return proto.CompactTextString(m) } +func (*SegmentPieceDeleteResult) ProtoMessage() {} +func (*SegmentPieceDeleteResult) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{60} +} +func (m *SegmentPieceDeleteResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentPieceDeleteResult.Unmarshal(m, b) +} +func (m *SegmentPieceDeleteResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentPieceDeleteResult.Marshal(b, m, deterministic) +} +func (m *SegmentPieceDeleteResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentPieceDeleteResult.Merge(m, src) +} +func (m *SegmentPieceDeleteResult) XXX_Size() int { + return xxx_messageInfo_SegmentPieceDeleteResult.Size(m) +} +func (m *SegmentPieceDeleteResult) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentPieceDeleteResult.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentPieceDeleteResult proto.InternalMessageInfo + +func (m *SegmentPieceDeleteResult) GetPieceNum() int32 { + if m != nil { + return m.PieceNum + } + return 0 +} + +func (m *SegmentPieceDeleteResult) GetHash() *PieceHash { + if m != nil { + return m.Hash + } + return nil +} + +type SegmentFinishDeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentFinishDeleteResponse) Reset() { *m = SegmentFinishDeleteResponse{} } +func (m *SegmentFinishDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentFinishDeleteResponse) ProtoMessage() {} +func (*SegmentFinishDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{61} +} +func (m *SegmentFinishDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentFinishDeleteResponse.Unmarshal(m, b) +} +func (m *SegmentFinishDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentFinishDeleteResponse.Marshal(b, m, deterministic) +} +func (m *SegmentFinishDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentFinishDeleteResponse.Merge(m, src) +} +func (m *SegmentFinishDeleteResponse) XXX_Size() int { + return xxx_messageInfo_SegmentFinishDeleteResponse.Size(m) +} +func (m *SegmentFinishDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentFinishDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentFinishDeleteResponse proto.InternalMessageInfo + +type SegmentListRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + CursorPosition *SegmentPosition `protobuf:"bytes,2,opt,name=cursor_position,json=cursorPosition,proto3" json:"cursor_position,omitempty"` + Limit int32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentListRequest) Reset() { *m = SegmentListRequest{} } +func (m *SegmentListRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentListRequest) ProtoMessage() {} +func (*SegmentListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{62} +} +func (m *SegmentListRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentListRequest.Unmarshal(m, b) +} +func (m *SegmentListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentListRequest.Marshal(b, m, deterministic) +} +func (m *SegmentListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentListRequest.Merge(m, src) +} +func (m *SegmentListRequest) XXX_Size() int { + return xxx_messageInfo_SegmentListRequest.Size(m) +} +func (m *SegmentListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentListRequest proto.InternalMessageInfo + +func (m *SegmentListRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentListRequest) GetCursorPosition() *SegmentPosition { + if m != nil { + return m.CursorPosition + } + return nil +} + +func (m *SegmentListRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +type SegmentListResponse struct { + Items []*SegmentListItem `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentListResponse) Reset() { *m = SegmentListResponse{} } +func (m *SegmentListResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentListResponse) ProtoMessage() {} +func (*SegmentListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{63} +} +func (m *SegmentListResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentListResponse.Unmarshal(m, b) +} +func (m *SegmentListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentListResponse.Marshal(b, m, deterministic) +} +func (m *SegmentListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentListResponse.Merge(m, src) +} +func (m *SegmentListResponse) XXX_Size() int { + return xxx_messageInfo_SegmentListResponse.Size(m) +} +func (m *SegmentListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentListResponse proto.InternalMessageInfo + +func (m *SegmentListResponse) GetItems() []*SegmentListItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *SegmentListResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +type SegmentListItem struct { + Position *SegmentPosition `protobuf:"bytes,1,opt,name=position,proto3" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentListItem) Reset() { *m = SegmentListItem{} } +func (m *SegmentListItem) String() string { return proto.CompactTextString(m) } +func (*SegmentListItem) ProtoMessage() {} +func (*SegmentListItem) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{64} +} +func (m *SegmentListItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentListItem.Unmarshal(m, b) +} +func (m *SegmentListItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentListItem.Marshal(b, m, deterministic) +} +func (m *SegmentListItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentListItem.Merge(m, src) +} +func (m *SegmentListItem) XXX_Size() int { + return xxx_messageInfo_SegmentListItem.Size(m) +} +func (m *SegmentListItem) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentListItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentListItem proto.InternalMessageInfo + +func (m *SegmentListItem) GetPosition() *SegmentPosition { + if m != nil { + return m.Position + } + return nil +} + +type SegmentDownloadRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + StreamId StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3,customtype=StreamID" json:"stream_id"` + CursorPosition *SegmentPosition `protobuf:"bytes,2,opt,name=cursor_position,json=cursorPosition,proto3" json:"cursor_position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentDownloadRequest) Reset() { *m = SegmentDownloadRequest{} } +func (m *SegmentDownloadRequest) String() string { return proto.CompactTextString(m) } +func (*SegmentDownloadRequest) ProtoMessage() {} +func (*SegmentDownloadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{65} +} +func (m *SegmentDownloadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentDownloadRequest.Unmarshal(m, b) +} +func (m *SegmentDownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentDownloadRequest.Marshal(b, m, deterministic) +} +func (m *SegmentDownloadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentDownloadRequest.Merge(m, src) +} +func (m *SegmentDownloadRequest) XXX_Size() int { + return xxx_messageInfo_SegmentDownloadRequest.Size(m) +} +func (m *SegmentDownloadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentDownloadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentDownloadRequest proto.InternalMessageInfo + +func (m *SegmentDownloadRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *SegmentDownloadRequest) GetCursorPosition() *SegmentPosition { + if m != nil { + return m.CursorPosition + } + return nil +} + +type SegmentDownloadResponse struct { + SegmentId SegmentID `protobuf:"bytes,1,opt,name=segment_id,json=segmentId,proto3,customtype=SegmentID" json:"segment_id"` + AddressedLimits []*AddressedOrderLimit `protobuf:"bytes,2,rep,name=addressed_limits,json=addressedLimits,proto3" json:"addressed_limits,omitempty"` + PrivateKey PiecePrivateKey `protobuf:"bytes,3,opt,name=private_key,json=privateKey,proto3,customtype=PiecePrivateKey" json:"private_key"` + EncryptedInlineData []byte `protobuf:"bytes,4,opt,name=encrypted_inline_data,json=encryptedInlineData,proto3" json:"encrypted_inline_data,omitempty"` + SegmentSize int64 `protobuf:"varint,5,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"` + EncryptedKeyNonce Nonce `protobuf:"bytes,6,opt,name=encrypted_key_nonce,json=encryptedKeyNonce,proto3,customtype=Nonce" json:"encrypted_key_nonce"` + EncryptedKey []byte `protobuf:"bytes,7,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + Next *SegmentPosition `protobuf:"bytes,8,opt,name=next,proto3" json:"next,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentDownloadResponse) Reset() { *m = SegmentDownloadResponse{} } +func (m *SegmentDownloadResponse) String() string { return proto.CompactTextString(m) } +func (*SegmentDownloadResponse) ProtoMessage() {} +func (*SegmentDownloadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{66} +} +func (m *SegmentDownloadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentDownloadResponse.Unmarshal(m, b) +} +func (m *SegmentDownloadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentDownloadResponse.Marshal(b, m, deterministic) +} +func (m *SegmentDownloadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentDownloadResponse.Merge(m, src) +} +func (m *SegmentDownloadResponse) XXX_Size() int { + return xxx_messageInfo_SegmentDownloadResponse.Size(m) +} +func (m *SegmentDownloadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentDownloadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentDownloadResponse proto.InternalMessageInfo + +func (m *SegmentDownloadResponse) GetAddressedLimits() []*AddressedOrderLimit { + if m != nil { + return m.AddressedLimits + } + return nil +} + +func (m *SegmentDownloadResponse) GetEncryptedInlineData() []byte { + if m != nil { + return m.EncryptedInlineData + } + return nil +} + +func (m *SegmentDownloadResponse) GetSegmentSize() int64 { + if m != nil { + return m.SegmentSize + } + return 0 +} + +func (m *SegmentDownloadResponse) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *SegmentDownloadResponse) GetNext() *SegmentPosition { + if m != nil { + return m.Next + } + return nil +} + +type BatchRequest struct { + Header *RequestHeader `protobuf:"bytes,15,opt,name=header,proto3" json:"header,omitempty"` + // headers for specific BatchRequestItems are ignored entirely + Requests []*BatchRequestItem `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchRequest) Reset() { *m = BatchRequest{} } +func (m *BatchRequest) String() string { return proto.CompactTextString(m) } +func (*BatchRequest) ProtoMessage() {} +func (*BatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{67} +} +func (m *BatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchRequest.Unmarshal(m, b) +} +func (m *BatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchRequest.Marshal(b, m, deterministic) +} +func (m *BatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchRequest.Merge(m, src) +} +func (m *BatchRequest) XXX_Size() int { + return xxx_messageInfo_BatchRequest.Size(m) +} +func (m *BatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchRequest proto.InternalMessageInfo + +func (m *BatchRequest) GetHeader() *RequestHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BatchRequest) GetRequests() []*BatchRequestItem { + if m != nil { + return m.Requests + } + return nil +} + +type BatchRequestItem struct { + // Types that are valid to be assigned to Request: + // *BatchRequestItem_BucketCreate + // *BatchRequestItem_BucketGet + // *BatchRequestItem_BucketDelete + // *BatchRequestItem_BucketList + // *BatchRequestItem_BucketSetAttribution + // *BatchRequestItem_ObjectBegin + // *BatchRequestItem_ObjectCommit + // *BatchRequestItem_ObjectGet + // *BatchRequestItem_ObjectList + // *BatchRequestItem_ObjectBeginDelete + // *BatchRequestItem_ObjectFinishDelete + // *BatchRequestItem_SegmentBegin + // *BatchRequestItem_SegmentCommit + // *BatchRequestItem_SegmentMakeInline + // *BatchRequestItem_SegmentBeginDelete + // *BatchRequestItem_SegmentFinishDelete + // *BatchRequestItem_SegmentList + // *BatchRequestItem_SegmentDownload + Request isBatchRequestItem_Request `protobuf_oneof:"Request"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchRequestItem) Reset() { *m = BatchRequestItem{} } +func (m *BatchRequestItem) String() string { return proto.CompactTextString(m) } +func (*BatchRequestItem) ProtoMessage() {} +func (*BatchRequestItem) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{68} +} +func (m *BatchRequestItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchRequestItem.Unmarshal(m, b) +} +func (m *BatchRequestItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchRequestItem.Marshal(b, m, deterministic) +} +func (m *BatchRequestItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchRequestItem.Merge(m, src) +} +func (m *BatchRequestItem) XXX_Size() int { + return xxx_messageInfo_BatchRequestItem.Size(m) +} +func (m *BatchRequestItem) XXX_DiscardUnknown() { + xxx_messageInfo_BatchRequestItem.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchRequestItem proto.InternalMessageInfo + +type isBatchRequestItem_Request interface { + isBatchRequestItem_Request() +} + +type BatchRequestItem_BucketCreate struct { + BucketCreate *BucketCreateRequest `protobuf:"bytes,1,opt,name=bucket_create,json=bucketCreate,proto3,oneof"` +} +type BatchRequestItem_BucketGet struct { + BucketGet *BucketGetRequest `protobuf:"bytes,2,opt,name=bucket_get,json=bucketGet,proto3,oneof"` +} +type BatchRequestItem_BucketDelete struct { + BucketDelete *BucketDeleteRequest `protobuf:"bytes,3,opt,name=bucket_delete,json=bucketDelete,proto3,oneof"` +} +type BatchRequestItem_BucketList struct { + BucketList *BucketListRequest `protobuf:"bytes,4,opt,name=bucket_list,json=bucketList,proto3,oneof"` +} +type BatchRequestItem_BucketSetAttribution struct { + BucketSetAttribution *BucketSetAttributionRequest `protobuf:"bytes,5,opt,name=bucket_set_attribution,json=bucketSetAttribution,proto3,oneof"` +} +type BatchRequestItem_ObjectBegin struct { + ObjectBegin *ObjectBeginRequest `protobuf:"bytes,6,opt,name=object_begin,json=objectBegin,proto3,oneof"` +} +type BatchRequestItem_ObjectCommit struct { + ObjectCommit *ObjectCommitRequest `protobuf:"bytes,7,opt,name=object_commit,json=objectCommit,proto3,oneof"` +} +type BatchRequestItem_ObjectGet struct { + ObjectGet *ObjectGetRequest `protobuf:"bytes,8,opt,name=object_get,json=objectGet,proto3,oneof"` +} +type BatchRequestItem_ObjectList struct { + ObjectList *ObjectListRequest `protobuf:"bytes,9,opt,name=object_list,json=objectList,proto3,oneof"` +} +type BatchRequestItem_ObjectBeginDelete struct { + ObjectBeginDelete *ObjectBeginDeleteRequest `protobuf:"bytes,10,opt,name=object_begin_delete,json=objectBeginDelete,proto3,oneof"` +} +type BatchRequestItem_ObjectFinishDelete struct { + ObjectFinishDelete *ObjectFinishDeleteRequest `protobuf:"bytes,11,opt,name=object_finish_delete,json=objectFinishDelete,proto3,oneof"` +} +type BatchRequestItem_SegmentBegin struct { + SegmentBegin *SegmentBeginRequest `protobuf:"bytes,12,opt,name=segment_begin,json=segmentBegin,proto3,oneof"` +} +type BatchRequestItem_SegmentCommit struct { + SegmentCommit *SegmentCommitRequest `protobuf:"bytes,13,opt,name=segment_commit,json=segmentCommit,proto3,oneof"` +} +type BatchRequestItem_SegmentMakeInline struct { + SegmentMakeInline *SegmentMakeInlineRequest `protobuf:"bytes,14,opt,name=segment_make_inline,json=segmentMakeInline,proto3,oneof"` +} +type BatchRequestItem_SegmentBeginDelete struct { + SegmentBeginDelete *SegmentBeginDeleteRequest `protobuf:"bytes,15,opt,name=segment_begin_delete,json=segmentBeginDelete,proto3,oneof"` +} +type BatchRequestItem_SegmentFinishDelete struct { + SegmentFinishDelete *SegmentFinishDeleteRequest `protobuf:"bytes,16,opt,name=segment_finish_delete,json=segmentFinishDelete,proto3,oneof"` +} +type BatchRequestItem_SegmentList struct { + SegmentList *SegmentListRequest `protobuf:"bytes,17,opt,name=segment_list,json=segmentList,proto3,oneof"` +} +type BatchRequestItem_SegmentDownload struct { + SegmentDownload *SegmentDownloadRequest `protobuf:"bytes,18,opt,name=segment_download,json=segmentDownload,proto3,oneof"` +} + +func (*BatchRequestItem_BucketCreate) isBatchRequestItem_Request() {} +func (*BatchRequestItem_BucketGet) isBatchRequestItem_Request() {} +func (*BatchRequestItem_BucketDelete) isBatchRequestItem_Request() {} +func (*BatchRequestItem_BucketList) isBatchRequestItem_Request() {} +func (*BatchRequestItem_BucketSetAttribution) isBatchRequestItem_Request() {} +func (*BatchRequestItem_ObjectBegin) isBatchRequestItem_Request() {} +func (*BatchRequestItem_ObjectCommit) isBatchRequestItem_Request() {} +func (*BatchRequestItem_ObjectGet) isBatchRequestItem_Request() {} +func (*BatchRequestItem_ObjectList) isBatchRequestItem_Request() {} +func (*BatchRequestItem_ObjectBeginDelete) isBatchRequestItem_Request() {} +func (*BatchRequestItem_ObjectFinishDelete) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentBegin) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentCommit) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentMakeInline) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentBeginDelete) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentFinishDelete) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentList) isBatchRequestItem_Request() {} +func (*BatchRequestItem_SegmentDownload) isBatchRequestItem_Request() {} + +func (m *BatchRequestItem) GetRequest() isBatchRequestItem_Request { + if m != nil { + return m.Request + } + return nil +} + +func (m *BatchRequestItem) GetBucketCreate() *BucketCreateRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_BucketCreate); ok { + return x.BucketCreate + } + return nil +} + +func (m *BatchRequestItem) GetBucketGet() *BucketGetRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_BucketGet); ok { + return x.BucketGet + } + return nil +} + +func (m *BatchRequestItem) GetBucketDelete() *BucketDeleteRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_BucketDelete); ok { + return x.BucketDelete + } + return nil +} + +func (m *BatchRequestItem) GetBucketList() *BucketListRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_BucketList); ok { + return x.BucketList + } + return nil +} + +func (m *BatchRequestItem) GetBucketSetAttribution() *BucketSetAttributionRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_BucketSetAttribution); ok { + return x.BucketSetAttribution + } + return nil +} + +func (m *BatchRequestItem) GetObjectBegin() *ObjectBeginRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_ObjectBegin); ok { + return x.ObjectBegin + } + return nil +} + +func (m *BatchRequestItem) GetObjectCommit() *ObjectCommitRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_ObjectCommit); ok { + return x.ObjectCommit + } + return nil +} + +func (m *BatchRequestItem) GetObjectGet() *ObjectGetRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_ObjectGet); ok { + return x.ObjectGet + } + return nil +} + +func (m *BatchRequestItem) GetObjectList() *ObjectListRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_ObjectList); ok { + return x.ObjectList + } + return nil +} + +func (m *BatchRequestItem) GetObjectBeginDelete() *ObjectBeginDeleteRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_ObjectBeginDelete); ok { + return x.ObjectBeginDelete + } + return nil +} + +func (m *BatchRequestItem) GetObjectFinishDelete() *ObjectFinishDeleteRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_ObjectFinishDelete); ok { + return x.ObjectFinishDelete + } + return nil +} + +func (m *BatchRequestItem) GetSegmentBegin() *SegmentBeginRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentBegin); ok { + return x.SegmentBegin + } + return nil +} + +func (m *BatchRequestItem) GetSegmentCommit() *SegmentCommitRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentCommit); ok { + return x.SegmentCommit + } + return nil +} + +func (m *BatchRequestItem) GetSegmentMakeInline() *SegmentMakeInlineRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentMakeInline); ok { + return x.SegmentMakeInline + } + return nil +} + +func (m *BatchRequestItem) GetSegmentBeginDelete() *SegmentBeginDeleteRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentBeginDelete); ok { + return x.SegmentBeginDelete + } + return nil +} + +func (m *BatchRequestItem) GetSegmentFinishDelete() *SegmentFinishDeleteRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentFinishDelete); ok { + return x.SegmentFinishDelete + } + return nil +} + +func (m *BatchRequestItem) GetSegmentList() *SegmentListRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentList); ok { + return x.SegmentList + } + return nil +} + +func (m *BatchRequestItem) GetSegmentDownload() *SegmentDownloadRequest { + if x, ok := m.GetRequest().(*BatchRequestItem_SegmentDownload); ok { + return x.SegmentDownload + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BatchRequestItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BatchRequestItem_OneofMarshaler, _BatchRequestItem_OneofUnmarshaler, _BatchRequestItem_OneofSizer, []interface{}{ + (*BatchRequestItem_BucketCreate)(nil), + (*BatchRequestItem_BucketGet)(nil), + (*BatchRequestItem_BucketDelete)(nil), + (*BatchRequestItem_BucketList)(nil), + (*BatchRequestItem_BucketSetAttribution)(nil), + (*BatchRequestItem_ObjectBegin)(nil), + (*BatchRequestItem_ObjectCommit)(nil), + (*BatchRequestItem_ObjectGet)(nil), + (*BatchRequestItem_ObjectList)(nil), + (*BatchRequestItem_ObjectBeginDelete)(nil), + (*BatchRequestItem_ObjectFinishDelete)(nil), + (*BatchRequestItem_SegmentBegin)(nil), + (*BatchRequestItem_SegmentCommit)(nil), + (*BatchRequestItem_SegmentMakeInline)(nil), + (*BatchRequestItem_SegmentBeginDelete)(nil), + (*BatchRequestItem_SegmentFinishDelete)(nil), + (*BatchRequestItem_SegmentList)(nil), + (*BatchRequestItem_SegmentDownload)(nil), + } +} + +func _BatchRequestItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BatchRequestItem) + // Request + switch x := m.Request.(type) { + case *BatchRequestItem_BucketCreate: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketCreate); err != nil { + return err + } + case *BatchRequestItem_BucketGet: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketGet); err != nil { + return err + } + case *BatchRequestItem_BucketDelete: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketDelete); err != nil { + return err + } + case *BatchRequestItem_BucketList: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketList); err != nil { + return err + } + case *BatchRequestItem_BucketSetAttribution: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketSetAttribution); err != nil { + return err + } + case *BatchRequestItem_ObjectBegin: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectBegin); err != nil { + return err + } + case *BatchRequestItem_ObjectCommit: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectCommit); err != nil { + return err + } + case *BatchRequestItem_ObjectGet: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectGet); err != nil { + return err + } + case *BatchRequestItem_ObjectList: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectList); err != nil { + return err + } + case *BatchRequestItem_ObjectBeginDelete: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectBeginDelete); err != nil { + return err + } + case *BatchRequestItem_ObjectFinishDelete: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectFinishDelete); err != nil { + return err + } + case *BatchRequestItem_SegmentBegin: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentBegin); err != nil { + return err + } + case *BatchRequestItem_SegmentCommit: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentCommit); err != nil { + return err + } + case *BatchRequestItem_SegmentMakeInline: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentMakeInline); err != nil { + return err + } + case *BatchRequestItem_SegmentBeginDelete: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentBeginDelete); err != nil { + return err + } + case *BatchRequestItem_SegmentFinishDelete: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentFinishDelete); err != nil { + return err + } + case *BatchRequestItem_SegmentList: + _ = b.EncodeVarint(17<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentList); err != nil { + return err + } + case *BatchRequestItem_SegmentDownload: + _ = b.EncodeVarint(18<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentDownload); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("BatchRequestItem.Request has unexpected type %T", x) + } + return nil +} + +func _BatchRequestItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BatchRequestItem) + switch tag { + case 1: // Request.bucket_create + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketCreateRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_BucketCreate{msg} + return true, err + case 2: // Request.bucket_get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketGetRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_BucketGet{msg} + return true, err + case 3: // Request.bucket_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketDeleteRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_BucketDelete{msg} + return true, err + case 4: // Request.bucket_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketListRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_BucketList{msg} + return true, err + case 5: // Request.bucket_set_attribution + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketSetAttributionRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_BucketSetAttribution{msg} + return true, err + case 6: // Request.object_begin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectBeginRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_ObjectBegin{msg} + return true, err + case 7: // Request.object_commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectCommitRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_ObjectCommit{msg} + return true, err + case 8: // Request.object_get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectGetRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_ObjectGet{msg} + return true, err + case 9: // Request.object_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectListRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_ObjectList{msg} + return true, err + case 10: // Request.object_begin_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectBeginDeleteRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_ObjectBeginDelete{msg} + return true, err + case 11: // Request.object_finish_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectFinishDeleteRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_ObjectFinishDelete{msg} + return true, err + case 12: // Request.segment_begin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentBeginRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentBegin{msg} + return true, err + case 13: // Request.segment_commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentCommitRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentCommit{msg} + return true, err + case 14: // Request.segment_make_inline + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentMakeInlineRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentMakeInline{msg} + return true, err + case 15: // Request.segment_begin_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentBeginDeleteRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentBeginDelete{msg} + return true, err + case 16: // Request.segment_finish_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentFinishDeleteRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentFinishDelete{msg} + return true, err + case 17: // Request.segment_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentListRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentList{msg} + return true, err + case 18: // Request.segment_download + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentDownloadRequest) + err := b.DecodeMessage(msg) + m.Request = &BatchRequestItem_SegmentDownload{msg} + return true, err + default: + return false, nil + } +} + +func _BatchRequestItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BatchRequestItem) + // Request + switch x := m.Request.(type) { + case *BatchRequestItem_BucketCreate: + s := proto.Size(x.BucketCreate) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_BucketGet: + s := proto.Size(x.BucketGet) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_BucketDelete: + s := proto.Size(x.BucketDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_BucketList: + s := proto.Size(x.BucketList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_BucketSetAttribution: + s := proto.Size(x.BucketSetAttribution) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_ObjectBegin: + s := proto.Size(x.ObjectBegin) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_ObjectCommit: + s := proto.Size(x.ObjectCommit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_ObjectGet: + s := proto.Size(x.ObjectGet) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_ObjectList: + s := proto.Size(x.ObjectList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_ObjectBeginDelete: + s := proto.Size(x.ObjectBeginDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_ObjectFinishDelete: + s := proto.Size(x.ObjectFinishDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentBegin: + s := proto.Size(x.SegmentBegin) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentCommit: + s := proto.Size(x.SegmentCommit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentMakeInline: + s := proto.Size(x.SegmentMakeInline) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentBeginDelete: + s := proto.Size(x.SegmentBeginDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentFinishDelete: + s := proto.Size(x.SegmentFinishDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentList: + s := proto.Size(x.SegmentList) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchRequestItem_SegmentDownload: + s := proto.Size(x.SegmentDownload) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type BatchResponse struct { + Responses []*BatchResponseItem `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchResponse) Reset() { *m = BatchResponse{} } +func (m *BatchResponse) String() string { return proto.CompactTextString(m) } +func (*BatchResponse) ProtoMessage() {} +func (*BatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{69} +} +func (m *BatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchResponse.Unmarshal(m, b) +} +func (m *BatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchResponse.Marshal(b, m, deterministic) +} +func (m *BatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchResponse.Merge(m, src) +} +func (m *BatchResponse) XXX_Size() int { + return xxx_messageInfo_BatchResponse.Size(m) +} +func (m *BatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchResponse proto.InternalMessageInfo + +func (m *BatchResponse) GetResponses() []*BatchResponseItem { + if m != nil { + return m.Responses + } + return nil +} + +type BatchResponseItem struct { + // Types that are valid to be assigned to Response: + // *BatchResponseItem_BucketCreate + // *BatchResponseItem_BucketGet + // *BatchResponseItem_BucketDelete + // *BatchResponseItem_BucketList + // *BatchResponseItem_BucketSetAttribution + // *BatchResponseItem_ObjectBegin + // *BatchResponseItem_ObjectCommit + // *BatchResponseItem_ObjectGet + // *BatchResponseItem_ObjectList + // *BatchResponseItem_ObjectBeginDelete + // *BatchResponseItem_ObjectFinishDelete + // *BatchResponseItem_SegmentBegin + // *BatchResponseItem_SegmentCommit + // *BatchResponseItem_SegmentMakeInline + // *BatchResponseItem_SegmentBeginDelete + // *BatchResponseItem_SegmentFinishDelete + // *BatchResponseItem_SegmentList + // *BatchResponseItem_SegmentDownload + Response isBatchResponseItem_Response `protobuf_oneof:"Response"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BatchResponseItem) Reset() { *m = BatchResponseItem{} } +func (m *BatchResponseItem) String() string { return proto.CompactTextString(m) } +func (*BatchResponseItem) ProtoMessage() {} +func (*BatchResponseItem) Descriptor() ([]byte, []int) { + return fileDescriptor_631e2f30a93cd64e, []int{70} +} +func (m *BatchResponseItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BatchResponseItem.Unmarshal(m, b) +} +func (m *BatchResponseItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BatchResponseItem.Marshal(b, m, deterministic) +} +func (m *BatchResponseItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_BatchResponseItem.Merge(m, src) +} +func (m *BatchResponseItem) XXX_Size() int { + return xxx_messageInfo_BatchResponseItem.Size(m) +} +func (m *BatchResponseItem) XXX_DiscardUnknown() { + xxx_messageInfo_BatchResponseItem.DiscardUnknown(m) +} + +var xxx_messageInfo_BatchResponseItem proto.InternalMessageInfo + +type isBatchResponseItem_Response interface { + isBatchResponseItem_Response() +} + +type BatchResponseItem_BucketCreate struct { + BucketCreate *BucketCreateResponse `protobuf:"bytes,1,opt,name=bucket_create,json=bucketCreate,proto3,oneof"` +} +type BatchResponseItem_BucketGet struct { + BucketGet *BucketGetResponse `protobuf:"bytes,2,opt,name=bucket_get,json=bucketGet,proto3,oneof"` +} +type BatchResponseItem_BucketDelete struct { + BucketDelete *BucketDeleteResponse `protobuf:"bytes,3,opt,name=bucket_delete,json=bucketDelete,proto3,oneof"` +} +type BatchResponseItem_BucketList struct { + BucketList *BucketListResponse `protobuf:"bytes,4,opt,name=bucket_list,json=bucketList,proto3,oneof"` +} +type BatchResponseItem_BucketSetAttribution struct { + BucketSetAttribution *BucketSetAttributionResponse `protobuf:"bytes,5,opt,name=bucket_set_attribution,json=bucketSetAttribution,proto3,oneof"` +} +type BatchResponseItem_ObjectBegin struct { + ObjectBegin *ObjectBeginResponse `protobuf:"bytes,6,opt,name=object_begin,json=objectBegin,proto3,oneof"` +} +type BatchResponseItem_ObjectCommit struct { + ObjectCommit *ObjectCommitResponse `protobuf:"bytes,7,opt,name=object_commit,json=objectCommit,proto3,oneof"` +} +type BatchResponseItem_ObjectGet struct { + ObjectGet *ObjectGetResponse `protobuf:"bytes,8,opt,name=object_get,json=objectGet,proto3,oneof"` +} +type BatchResponseItem_ObjectList struct { + ObjectList *ObjectListResponse `protobuf:"bytes,9,opt,name=object_list,json=objectList,proto3,oneof"` +} +type BatchResponseItem_ObjectBeginDelete struct { + ObjectBeginDelete *ObjectBeginDeleteResponse `protobuf:"bytes,10,opt,name=object_begin_delete,json=objectBeginDelete,proto3,oneof"` +} +type BatchResponseItem_ObjectFinishDelete struct { + ObjectFinishDelete *ObjectFinishDeleteResponse `protobuf:"bytes,11,opt,name=object_finish_delete,json=objectFinishDelete,proto3,oneof"` +} +type BatchResponseItem_SegmentBegin struct { + SegmentBegin *SegmentBeginResponse `protobuf:"bytes,12,opt,name=segment_begin,json=segmentBegin,proto3,oneof"` +} +type BatchResponseItem_SegmentCommit struct { + SegmentCommit *SegmentCommitResponse `protobuf:"bytes,13,opt,name=segment_commit,json=segmentCommit,proto3,oneof"` +} +type BatchResponseItem_SegmentMakeInline struct { + SegmentMakeInline *SegmentMakeInlineResponse `protobuf:"bytes,14,opt,name=segment_make_inline,json=segmentMakeInline,proto3,oneof"` +} +type BatchResponseItem_SegmentBeginDelete struct { + SegmentBeginDelete *SegmentBeginDeleteResponse `protobuf:"bytes,15,opt,name=segment_begin_delete,json=segmentBeginDelete,proto3,oneof"` +} +type BatchResponseItem_SegmentFinishDelete struct { + SegmentFinishDelete *SegmentFinishDeleteResponse `protobuf:"bytes,16,opt,name=segment_finish_delete,json=segmentFinishDelete,proto3,oneof"` +} +type BatchResponseItem_SegmentList struct { + SegmentList *SegmentListResponse `protobuf:"bytes,17,opt,name=segment_list,json=segmentList,proto3,oneof"` +} +type BatchResponseItem_SegmentDownload struct { + SegmentDownload *SegmentDownloadResponse `protobuf:"bytes,18,opt,name=segment_download,json=segmentDownload,proto3,oneof"` +} + +func (*BatchResponseItem_BucketCreate) isBatchResponseItem_Response() {} +func (*BatchResponseItem_BucketGet) isBatchResponseItem_Response() {} +func (*BatchResponseItem_BucketDelete) isBatchResponseItem_Response() {} +func (*BatchResponseItem_BucketList) isBatchResponseItem_Response() {} +func (*BatchResponseItem_BucketSetAttribution) isBatchResponseItem_Response() {} +func (*BatchResponseItem_ObjectBegin) isBatchResponseItem_Response() {} +func (*BatchResponseItem_ObjectCommit) isBatchResponseItem_Response() {} +func (*BatchResponseItem_ObjectGet) isBatchResponseItem_Response() {} +func (*BatchResponseItem_ObjectList) isBatchResponseItem_Response() {} +func (*BatchResponseItem_ObjectBeginDelete) isBatchResponseItem_Response() {} +func (*BatchResponseItem_ObjectFinishDelete) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentBegin) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentCommit) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentMakeInline) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentBeginDelete) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentFinishDelete) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentList) isBatchResponseItem_Response() {} +func (*BatchResponseItem_SegmentDownload) isBatchResponseItem_Response() {} + +func (m *BatchResponseItem) GetResponse() isBatchResponseItem_Response { + if m != nil { + return m.Response + } + return nil +} + +func (m *BatchResponseItem) GetBucketCreate() *BucketCreateResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_BucketCreate); ok { + return x.BucketCreate + } + return nil +} + +func (m *BatchResponseItem) GetBucketGet() *BucketGetResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_BucketGet); ok { + return x.BucketGet + } + return nil +} + +func (m *BatchResponseItem) GetBucketDelete() *BucketDeleteResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_BucketDelete); ok { + return x.BucketDelete + } + return nil +} + +func (m *BatchResponseItem) GetBucketList() *BucketListResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_BucketList); ok { + return x.BucketList + } + return nil +} + +func (m *BatchResponseItem) GetBucketSetAttribution() *BucketSetAttributionResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_BucketSetAttribution); ok { + return x.BucketSetAttribution + } + return nil +} + +func (m *BatchResponseItem) GetObjectBegin() *ObjectBeginResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_ObjectBegin); ok { + return x.ObjectBegin + } + return nil +} + +func (m *BatchResponseItem) GetObjectCommit() *ObjectCommitResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_ObjectCommit); ok { + return x.ObjectCommit + } + return nil +} + +func (m *BatchResponseItem) GetObjectGet() *ObjectGetResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_ObjectGet); ok { + return x.ObjectGet + } + return nil +} + +func (m *BatchResponseItem) GetObjectList() *ObjectListResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_ObjectList); ok { + return x.ObjectList + } + return nil +} + +func (m *BatchResponseItem) GetObjectBeginDelete() *ObjectBeginDeleteResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_ObjectBeginDelete); ok { + return x.ObjectBeginDelete + } + return nil +} + +func (m *BatchResponseItem) GetObjectFinishDelete() *ObjectFinishDeleteResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_ObjectFinishDelete); ok { + return x.ObjectFinishDelete + } + return nil +} + +func (m *BatchResponseItem) GetSegmentBegin() *SegmentBeginResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentBegin); ok { + return x.SegmentBegin + } + return nil +} + +func (m *BatchResponseItem) GetSegmentCommit() *SegmentCommitResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentCommit); ok { + return x.SegmentCommit + } + return nil +} + +func (m *BatchResponseItem) GetSegmentMakeInline() *SegmentMakeInlineResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentMakeInline); ok { + return x.SegmentMakeInline + } + return nil +} + +func (m *BatchResponseItem) GetSegmentBeginDelete() *SegmentBeginDeleteResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentBeginDelete); ok { + return x.SegmentBeginDelete + } + return nil +} + +func (m *BatchResponseItem) GetSegmentFinishDelete() *SegmentFinishDeleteResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentFinishDelete); ok { + return x.SegmentFinishDelete + } + return nil +} + +func (m *BatchResponseItem) GetSegmentList() *SegmentListResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentList); ok { + return x.SegmentList + } + return nil +} + +func (m *BatchResponseItem) GetSegmentDownload() *SegmentDownloadResponse { + if x, ok := m.GetResponse().(*BatchResponseItem_SegmentDownload); ok { + return x.SegmentDownload + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*BatchResponseItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _BatchResponseItem_OneofMarshaler, _BatchResponseItem_OneofUnmarshaler, _BatchResponseItem_OneofSizer, []interface{}{ + (*BatchResponseItem_BucketCreate)(nil), + (*BatchResponseItem_BucketGet)(nil), + (*BatchResponseItem_BucketDelete)(nil), + (*BatchResponseItem_BucketList)(nil), + (*BatchResponseItem_BucketSetAttribution)(nil), + (*BatchResponseItem_ObjectBegin)(nil), + (*BatchResponseItem_ObjectCommit)(nil), + (*BatchResponseItem_ObjectGet)(nil), + (*BatchResponseItem_ObjectList)(nil), + (*BatchResponseItem_ObjectBeginDelete)(nil), + (*BatchResponseItem_ObjectFinishDelete)(nil), + (*BatchResponseItem_SegmentBegin)(nil), + (*BatchResponseItem_SegmentCommit)(nil), + (*BatchResponseItem_SegmentMakeInline)(nil), + (*BatchResponseItem_SegmentBeginDelete)(nil), + (*BatchResponseItem_SegmentFinishDelete)(nil), + (*BatchResponseItem_SegmentList)(nil), + (*BatchResponseItem_SegmentDownload)(nil), + } +} + +func _BatchResponseItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*BatchResponseItem) + // Response + switch x := m.Response.(type) { + case *BatchResponseItem_BucketCreate: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketCreate); err != nil { + return err + } + case *BatchResponseItem_BucketGet: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketGet); err != nil { + return err + } + case *BatchResponseItem_BucketDelete: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketDelete); err != nil { + return err + } + case *BatchResponseItem_BucketList: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketList); err != nil { + return err + } + case *BatchResponseItem_BucketSetAttribution: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BucketSetAttribution); err != nil { + return err + } + case *BatchResponseItem_ObjectBegin: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectBegin); err != nil { + return err + } + case *BatchResponseItem_ObjectCommit: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectCommit); err != nil { + return err + } + case *BatchResponseItem_ObjectGet: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectGet); err != nil { + return err + } + case *BatchResponseItem_ObjectList: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectList); err != nil { + return err + } + case *BatchResponseItem_ObjectBeginDelete: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectBeginDelete); err != nil { + return err + } + case *BatchResponseItem_ObjectFinishDelete: + _ = b.EncodeVarint(11<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ObjectFinishDelete); err != nil { + return err + } + case *BatchResponseItem_SegmentBegin: + _ = b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentBegin); err != nil { + return err + } + case *BatchResponseItem_SegmentCommit: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentCommit); err != nil { + return err + } + case *BatchResponseItem_SegmentMakeInline: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentMakeInline); err != nil { + return err + } + case *BatchResponseItem_SegmentBeginDelete: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentBeginDelete); err != nil { + return err + } + case *BatchResponseItem_SegmentFinishDelete: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentFinishDelete); err != nil { + return err + } + case *BatchResponseItem_SegmentList: + _ = b.EncodeVarint(17<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentList); err != nil { + return err + } + case *BatchResponseItem_SegmentDownload: + _ = b.EncodeVarint(18<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SegmentDownload); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("BatchResponseItem.Response has unexpected type %T", x) + } + return nil +} + +func _BatchResponseItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*BatchResponseItem) + switch tag { + case 1: // Response.bucket_create + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketCreateResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_BucketCreate{msg} + return true, err + case 2: // Response.bucket_get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketGetResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_BucketGet{msg} + return true, err + case 3: // Response.bucket_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketDeleteResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_BucketDelete{msg} + return true, err + case 4: // Response.bucket_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketListResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_BucketList{msg} + return true, err + case 5: // Response.bucket_set_attribution + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BucketSetAttributionResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_BucketSetAttribution{msg} + return true, err + case 6: // Response.object_begin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectBeginResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_ObjectBegin{msg} + return true, err + case 7: // Response.object_commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectCommitResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_ObjectCommit{msg} + return true, err + case 8: // Response.object_get + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectGetResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_ObjectGet{msg} + return true, err + case 9: // Response.object_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectListResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_ObjectList{msg} + return true, err + case 10: // Response.object_begin_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectBeginDeleteResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_ObjectBeginDelete{msg} + return true, err + case 11: // Response.object_finish_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ObjectFinishDeleteResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_ObjectFinishDelete{msg} + return true, err + case 12: // Response.segment_begin + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentBeginResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentBegin{msg} + return true, err + case 13: // Response.segment_commit + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentCommitResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentCommit{msg} + return true, err + case 14: // Response.segment_make_inline + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentMakeInlineResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentMakeInline{msg} + return true, err + case 15: // Response.segment_begin_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentBeginDeleteResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentBeginDelete{msg} + return true, err + case 16: // Response.segment_finish_delete + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentFinishDeleteResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentFinishDelete{msg} + return true, err + case 17: // Response.segment_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentListResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentList{msg} + return true, err + case 18: // Response.segment_download + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SegmentDownloadResponse) + err := b.DecodeMessage(msg) + m.Response = &BatchResponseItem_SegmentDownload{msg} + return true, err + default: + return false, nil + } +} + +func _BatchResponseItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*BatchResponseItem) + // Response + switch x := m.Response.(type) { + case *BatchResponseItem_BucketCreate: + s := proto.Size(x.BucketCreate) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_BucketGet: + s := proto.Size(x.BucketGet) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_BucketDelete: + s := proto.Size(x.BucketDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_BucketList: + s := proto.Size(x.BucketList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_BucketSetAttribution: + s := proto.Size(x.BucketSetAttribution) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_ObjectBegin: + s := proto.Size(x.ObjectBegin) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_ObjectCommit: + s := proto.Size(x.ObjectCommit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_ObjectGet: + s := proto.Size(x.ObjectGet) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_ObjectList: + s := proto.Size(x.ObjectList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_ObjectBeginDelete: + s := proto.Size(x.ObjectBeginDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_ObjectFinishDelete: + s := proto.Size(x.ObjectFinishDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentBegin: + s := proto.Size(x.SegmentBegin) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentCommit: + s := proto.Size(x.SegmentCommit) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentMakeInline: + s := proto.Size(x.SegmentMakeInline) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentBeginDelete: + s := proto.Size(x.SegmentBeginDelete) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentFinishDelete: + s := proto.Size(x.SegmentFinishDelete) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentList: + s := proto.Size(x.SegmentList) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *BatchResponseItem_SegmentDownload: + s := proto.Size(x.SegmentDownload) + n += 2 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterEnum("metainfo.Object_Status", Object_Status_name, Object_Status_value) + proto.RegisterType((*RequestHeader)(nil), "metainfo.RequestHeader") + proto.RegisterType((*Bucket)(nil), "metainfo.Bucket") + proto.RegisterType((*BucketListItem)(nil), "metainfo.BucketListItem") + proto.RegisterType((*BucketCreateRequest)(nil), "metainfo.BucketCreateRequest") + proto.RegisterType((*BucketCreateResponse)(nil), "metainfo.BucketCreateResponse") + proto.RegisterType((*BucketGetRequest)(nil), "metainfo.BucketGetRequest") + proto.RegisterType((*BucketGetResponse)(nil), "metainfo.BucketGetResponse") + proto.RegisterType((*BucketDeleteRequest)(nil), "metainfo.BucketDeleteRequest") + proto.RegisterType((*BucketDeleteResponse)(nil), "metainfo.BucketDeleteResponse") + proto.RegisterType((*BucketListRequest)(nil), "metainfo.BucketListRequest") + proto.RegisterType((*BucketListResponse)(nil), "metainfo.BucketListResponse") + proto.RegisterType((*BucketSetAttributionRequest)(nil), "metainfo.BucketSetAttributionRequest") + proto.RegisterType((*BucketSetAttributionResponse)(nil), "metainfo.BucketSetAttributionResponse") + proto.RegisterType((*AddressedOrderLimit)(nil), "metainfo.AddressedOrderLimit") + proto.RegisterType((*SegmentWriteRequestOld)(nil), "metainfo.SegmentWriteRequestOld") + proto.RegisterType((*SegmentWriteResponseOld)(nil), "metainfo.SegmentWriteResponseOld") + proto.RegisterType((*SegmentCommitRequestOld)(nil), "metainfo.SegmentCommitRequestOld") + proto.RegisterType((*SegmentCommitResponseOld)(nil), "metainfo.SegmentCommitResponseOld") + proto.RegisterType((*SegmentDownloadRequestOld)(nil), "metainfo.SegmentDownloadRequestOld") + proto.RegisterType((*SegmentDownloadResponseOld)(nil), "metainfo.SegmentDownloadResponseOld") + proto.RegisterType((*SegmentInfoRequestOld)(nil), "metainfo.SegmentInfoRequestOld") + proto.RegisterType((*SegmentInfoResponseOld)(nil), "metainfo.SegmentInfoResponseOld") + proto.RegisterType((*SegmentDeleteRequestOld)(nil), "metainfo.SegmentDeleteRequestOld") + proto.RegisterType((*SegmentDeleteResponseOld)(nil), "metainfo.SegmentDeleteResponseOld") + proto.RegisterType((*ListSegmentsRequestOld)(nil), "metainfo.ListSegmentsRequestOld") + proto.RegisterType((*ListSegmentsResponseOld)(nil), "metainfo.ListSegmentsResponseOld") + proto.RegisterType((*ListSegmentsResponseOld_Item)(nil), "metainfo.ListSegmentsResponseOld.Item") + proto.RegisterType((*SetAttributionRequestOld)(nil), "metainfo.SetAttributionRequestOld") + proto.RegisterType((*SetAttributionResponseOld)(nil), "metainfo.SetAttributionResponseOld") + proto.RegisterType((*ProjectInfoRequest)(nil), "metainfo.ProjectInfoRequest") + proto.RegisterType((*ProjectInfoResponse)(nil), "metainfo.ProjectInfoResponse") + proto.RegisterType((*Object)(nil), "metainfo.Object") + proto.RegisterType((*ObjectBeginRequest)(nil), "metainfo.ObjectBeginRequest") + proto.RegisterType((*ObjectBeginResponse)(nil), "metainfo.ObjectBeginResponse") + proto.RegisterType((*ObjectCommitRequest)(nil), "metainfo.ObjectCommitRequest") + proto.RegisterType((*ObjectCommitResponse)(nil), "metainfo.ObjectCommitResponse") + proto.RegisterType((*ObjectGetRequest)(nil), "metainfo.ObjectGetRequest") + proto.RegisterType((*ObjectGetResponse)(nil), "metainfo.ObjectGetResponse") + proto.RegisterType((*ObjectListRequest)(nil), "metainfo.ObjectListRequest") + proto.RegisterType((*ObjectListResponse)(nil), "metainfo.ObjectListResponse") + proto.RegisterType((*ObjectListItem)(nil), "metainfo.ObjectListItem") + proto.RegisterType((*ObjectListItemIncludes)(nil), "metainfo.ObjectListItemIncludes") + proto.RegisterType((*ObjectBeginDeleteRequest)(nil), "metainfo.ObjectBeginDeleteRequest") + proto.RegisterType((*ObjectBeginDeleteResponse)(nil), "metainfo.ObjectBeginDeleteResponse") + proto.RegisterType((*ObjectFinishDeleteRequest)(nil), "metainfo.ObjectFinishDeleteRequest") + proto.RegisterType((*ObjectFinishDeleteResponse)(nil), "metainfo.ObjectFinishDeleteResponse") + proto.RegisterType((*SatStreamID)(nil), "metainfo.SatStreamID") + proto.RegisterType((*Segment)(nil), "metainfo.Segment") + proto.RegisterType((*Piece)(nil), "metainfo.Piece") + proto.RegisterType((*SegmentPosition)(nil), "metainfo.SegmentPosition") + proto.RegisterType((*SegmentBeginRequest)(nil), "metainfo.SegmentBeginRequest") + proto.RegisterType((*SegmentBeginResponse)(nil), "metainfo.SegmentBeginResponse") + proto.RegisterType((*SegmentCommitRequest)(nil), "metainfo.SegmentCommitRequest") + proto.RegisterType((*SegmentPieceUploadResult)(nil), "metainfo.SegmentPieceUploadResult") + proto.RegisterType((*SatSegmentID)(nil), "metainfo.SatSegmentID") + proto.RegisterType((*SegmentCommitResponse)(nil), "metainfo.SegmentCommitResponse") + proto.RegisterType((*SegmentMakeInlineRequest)(nil), "metainfo.SegmentMakeInlineRequest") + proto.RegisterType((*SegmentMakeInlineResponse)(nil), "metainfo.SegmentMakeInlineResponse") + proto.RegisterType((*SegmentBeginDeleteRequest)(nil), "metainfo.SegmentBeginDeleteRequest") + proto.RegisterType((*SegmentBeginDeleteResponse)(nil), "metainfo.SegmentBeginDeleteResponse") + proto.RegisterType((*SegmentFinishDeleteRequest)(nil), "metainfo.SegmentFinishDeleteRequest") + proto.RegisterType((*SegmentPieceDeleteResult)(nil), "metainfo.SegmentPieceDeleteResult") + proto.RegisterType((*SegmentFinishDeleteResponse)(nil), "metainfo.SegmentFinishDeleteResponse") + proto.RegisterType((*SegmentListRequest)(nil), "metainfo.SegmentListRequest") + proto.RegisterType((*SegmentListResponse)(nil), "metainfo.SegmentListResponse") + proto.RegisterType((*SegmentListItem)(nil), "metainfo.SegmentListItem") + proto.RegisterType((*SegmentDownloadRequest)(nil), "metainfo.SegmentDownloadRequest") + proto.RegisterType((*SegmentDownloadResponse)(nil), "metainfo.SegmentDownloadResponse") + proto.RegisterType((*BatchRequest)(nil), "metainfo.BatchRequest") + proto.RegisterType((*BatchRequestItem)(nil), "metainfo.BatchRequestItem") + proto.RegisterType((*BatchResponse)(nil), "metainfo.BatchResponse") + proto.RegisterType((*BatchResponseItem)(nil), "metainfo.BatchResponseItem") +} + +func init() { proto.RegisterFile("metainfo.proto", fileDescriptor_631e2f30a93cd64e) } + +var fileDescriptor_631e2f30a93cd64e = []byte{ + // 3749 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x4d, 0x6c, 0x1c, 0xc7, + 0x95, 0xe6, 0xcc, 0x70, 0xfe, 0xde, 0x0c, 0x39, 0xc3, 0x22, 0x45, 0x8e, 0x9a, 0xa2, 0x28, 0xb5, + 0x24, 0x5b, 0x8b, 0xb5, 0x29, 0x83, 0xde, 0x5d, 0x78, 0x21, 0x79, 0xbd, 0xa4, 0x86, 0x16, 0xc7, + 0x12, 0x29, 0xba, 0x29, 0x59, 0x5a, 0xad, 0x77, 0x1b, 0xcd, 0xe9, 0x22, 0xd9, 0xd6, 0x4c, 0xf7, + 0xa4, 0xbb, 0xc7, 0xa6, 0x9c, 0x53, 0x00, 0x03, 0x41, 0x10, 0x20, 0x30, 0x72, 0x0e, 0x72, 0xf2, + 0x2d, 0xa7, 0xe4, 0x16, 0x20, 0xc9, 0x35, 0x09, 0x10, 0x38, 0x80, 0x11, 0xe4, 0x90, 0x83, 0x93, + 0x53, 0x6e, 0xc9, 0x25, 0xb7, 0x04, 0x01, 0x82, 0xfa, 0xeb, 0xff, 0x9e, 0x1f, 0xfe, 0x48, 0xf1, + 0xad, 0xfb, 0x55, 0xd5, 0xeb, 0xaa, 0xef, 0xbd, 0xfa, 0xde, 0xeb, 0x57, 0xdd, 0x30, 0xdd, 0xc5, + 0xae, 0x66, 0x98, 0xfb, 0xd6, 0x4a, 0xcf, 0xb6, 0x5c, 0x0b, 0x95, 0xc4, 0xbd, 0x54, 0xc7, 0x66, + 0xdb, 0x7e, 0xd6, 0x73, 0x0d, 0xcb, 0x64, 0x6d, 0x12, 0x1c, 0x58, 0x07, 0xbc, 0x9f, 0xb4, 0x7c, + 0x60, 0x59, 0x07, 0x1d, 0x7c, 0x83, 0xde, 0xed, 0xf5, 0xf7, 0x6f, 0xb8, 0x46, 0x17, 0x3b, 0xae, + 0xd6, 0xed, 0x89, 0xce, 0xa6, 0xa5, 0x63, 0x7e, 0x5d, 0xeb, 0x59, 0x86, 0xe9, 0x62, 0x5b, 0xdf, + 0xe3, 0x82, 0xaa, 0x65, 0xeb, 0xd8, 0x76, 0xd8, 0x9d, 0x7c, 0x07, 0xa6, 0x14, 0xfc, 0xb5, 0x3e, + 0x76, 0xdc, 0x4d, 0xac, 0xe9, 0xd8, 0x46, 0x0b, 0x50, 0xd4, 0x7a, 0x86, 0xfa, 0x14, 0x3f, 0x6b, + 0x64, 0x2e, 0x65, 0xae, 0x57, 0x95, 0x82, 0xd6, 0x33, 0xee, 0xe2, 0x67, 0x68, 0x09, 0xa0, 0xef, + 0x60, 0x5b, 0xd5, 0x0e, 0xb0, 0xe9, 0x36, 0xb2, 0xb4, 0xad, 0x4c, 0x24, 0x6b, 0x44, 0x20, 0xff, + 0x20, 0x07, 0x85, 0xf5, 0x7e, 0xfb, 0x29, 0x76, 0x11, 0x82, 0x49, 0x53, 0xeb, 0x62, 0x3e, 0x9e, + 0x5e, 0xa3, 0x37, 0xa0, 0xd2, 0xd3, 0xdc, 0x43, 0xb5, 0x6d, 0xf4, 0x0e, 0xb1, 0x4d, 0x87, 0x4f, + 0xaf, 0x2e, 0xac, 0x04, 0xd6, 0x79, 0x9b, 0xb6, 0xec, 0xf6, 0x0d, 0x17, 0x2b, 0x40, 0xfa, 0x32, + 0x01, 0xba, 0x0d, 0xd0, 0xb6, 0xb1, 0xe6, 0x62, 0x5d, 0xd5, 0xdc, 0x46, 0xee, 0x52, 0xe6, 0x7a, + 0x65, 0x55, 0x5a, 0x61, 0x10, 0xac, 0x08, 0x08, 0x56, 0x1e, 0x08, 0x08, 0xd6, 0x4b, 0xbf, 0xf8, + 0x72, 0x79, 0xe2, 0xd3, 0xdf, 0x2f, 0x67, 0x94, 0x32, 0x1f, 0xb7, 0xe6, 0xa2, 0xd7, 0x60, 0x4e, + 0xc7, 0xfb, 0x5a, 0xbf, 0xe3, 0xaa, 0x0e, 0x3e, 0xe8, 0x62, 0xd3, 0x55, 0x1d, 0xe3, 0x63, 0xdc, + 0x98, 0xbc, 0x94, 0xb9, 0x9e, 0x53, 0x10, 0x6f, 0xdb, 0x65, 0x4d, 0xbb, 0xc6, 0xc7, 0x18, 0x3d, + 0x82, 0xf3, 0x62, 0x84, 0x8d, 0xf5, 0xbe, 0xa9, 0x6b, 0x66, 0xfb, 0x99, 0xea, 0xb4, 0x0f, 0x71, + 0x17, 0x37, 0xf2, 0x74, 0x16, 0x8b, 0x2b, 0x3e, 0xb6, 0x8a, 0xd7, 0x67, 0x97, 0x76, 0x51, 0x16, + 0xf8, 0xe8, 0x68, 0x03, 0xd2, 0x61, 0x49, 0x28, 0xf6, 0x57, 0xaf, 0xf6, 0x34, 0x5b, 0xeb, 0x62, + 0x17, 0xdb, 0x4e, 0xa3, 0x40, 0x95, 0x5f, 0x0a, 0x62, 0xb3, 0xe1, 0x5d, 0xee, 0x78, 0xfd, 0x94, + 0x45, 0xae, 0x26, 0xa9, 0x91, 0x58, 0xab, 0xa7, 0xd9, 0xae, 0x89, 0x6d, 0xd5, 0xd0, 0x1b, 0x45, + 0x66, 0x2d, 0x2e, 0x69, 0xe9, 0xb2, 0x01, 0xd3, 0xcc, 0x58, 0xf7, 0x0c, 0xc7, 0x6d, 0xb9, 0xb8, + 0x9b, 0x68, 0xb4, 0x30, 0xf4, 0xd9, 0x63, 0x41, 0x2f, 0x7f, 0x96, 0x83, 0x59, 0xf6, 0xac, 0xdb, + 0x54, 0xc6, 0xdd, 0x0d, 0xdd, 0x80, 0xc2, 0x21, 0x75, 0xb9, 0x46, 0x8d, 0x2a, 0x5e, 0x58, 0xf1, + 0xb6, 0x43, 0xc8, 0x23, 0x15, 0xde, 0xed, 0x94, 0xdd, 0x2a, 0xcd, 0x23, 0x72, 0xc7, 0xf3, 0x88, + 0xc9, 0xb3, 0xf4, 0x88, 0xfc, 0xe9, 0x7b, 0x44, 0x21, 0xea, 0x11, 0xff, 0x0d, 0x73, 0x61, 0x2b, + 0x39, 0x3d, 0xcb, 0x74, 0x30, 0xba, 0x0e, 0x85, 0x3d, 0x2a, 0xa7, 0xb8, 0x57, 0x56, 0xeb, 0xbe, + 0x99, 0x58, 0x7f, 0x85, 0xb7, 0xcb, 0x8f, 0xa0, 0xce, 0x24, 0x77, 0xb0, 0x7b, 0x9a, 0x46, 0x96, + 0xdf, 0x84, 0x99, 0x80, 0xe2, 0xb1, 0xe7, 0xf5, 0x44, 0xf8, 0x5f, 0x13, 0x77, 0xf0, 0xe9, 0xfa, + 0x9f, 0x8f, 0x9a, 0xd0, 0x3d, 0xf6, 0xec, 0x3e, 0xcd, 0x88, 0xd5, 0x91, 0xad, 0x78, 0xec, 0xc9, + 0xcd, 0x43, 0xa1, 0xdd, 0xb7, 0x1d, 0xcb, 0x16, 0xac, 0xcd, 0xee, 0xd0, 0x1c, 0xe4, 0x3b, 0x46, + 0xd7, 0x60, 0xbb, 0x37, 0xaf, 0xb0, 0x1b, 0x74, 0x01, 0xca, 0xba, 0x61, 0xe3, 0x36, 0x71, 0x11, + 0xea, 0xf1, 0x79, 0xc5, 0x17, 0xc8, 0x8f, 0x01, 0x05, 0x67, 0xc4, 0x97, 0xb4, 0x02, 0x79, 0xc3, + 0xc5, 0x5d, 0xa7, 0x91, 0xb9, 0x94, 0xbb, 0x5e, 0x59, 0x6d, 0x44, 0x57, 0x24, 0x98, 0x44, 0x61, + 0xdd, 0x08, 0x5c, 0x5d, 0xcb, 0xc6, 0xf4, 0xc1, 0x25, 0x85, 0x5e, 0xcb, 0xdf, 0xc8, 0xc0, 0x22, + 0xeb, 0xbd, 0x8b, 0xdd, 0x35, 0xd7, 0xb5, 0x8d, 0xbd, 0x3e, 0x79, 0xe4, 0xa9, 0x72, 0x42, 0xd8, + 0xd1, 0xb3, 0x51, 0x47, 0xbf, 0x08, 0x17, 0x92, 0xa7, 0xc0, 0xd6, 0x29, 0x7f, 0x92, 0x81, 0xd9, + 0x35, 0x5d, 0xb7, 0xb1, 0xe3, 0x60, 0xfd, 0x3e, 0x89, 0x95, 0xf7, 0x28, 0x66, 0xd7, 0x05, 0x92, + 0xcc, 0xa2, 0x68, 0x85, 0xc7, 0x51, 0xbf, 0x8b, 0x40, 0xf7, 0x36, 0xcc, 0x39, 0xae, 0x65, 0x6b, + 0x07, 0x58, 0x25, 0x81, 0x58, 0xd5, 0x98, 0x36, 0x4e, 0xa0, 0x33, 0x2b, 0x34, 0x3a, 0x6f, 0x5b, + 0x3a, 0xe6, 0x8f, 0x51, 0x10, 0xef, 0x1e, 0x90, 0xc9, 0xbf, 0xce, 0xc2, 0x3c, 0x67, 0x9f, 0x47, + 0xb6, 0xe1, 0xb9, 0xed, 0xfd, 0x8e, 0x7e, 0x2c, 0xe7, 0x08, 0x78, 0x63, 0x55, 0xf8, 0x1e, 0x41, + 0x8f, 0x30, 0x22, 0xc7, 0x88, 0x5e, 0xa3, 0x06, 0x14, 0x39, 0x1f, 0x72, 0x2a, 0x14, 0xb7, 0xe8, + 0x26, 0x80, 0xcf, 0x7b, 0xa3, 0x10, 0x5e, 0xa0, 0x3b, 0xba, 0x09, 0x52, 0x57, 0x3b, 0x12, 0xfc, + 0x86, 0xf5, 0x30, 0xe9, 0xe6, 0xe9, 0x93, 0x16, 0xba, 0xda, 0xd1, 0x86, 0xe8, 0x10, 0x64, 0xde, + 0x26, 0x00, 0x3e, 0xea, 0x19, 0xb6, 0x46, 0xfd, 0xb5, 0x30, 0x46, 0x1c, 0x0a, 0x8c, 0x93, 0xbf, + 0xc8, 0xc0, 0x42, 0x18, 0x51, 0x66, 0x71, 0x02, 0xe9, 0x26, 0xd4, 0x35, 0x61, 0x73, 0x95, 0x5a, + 0x51, 0xf8, 0xf9, 0x92, 0x0f, 0x6e, 0x82, 0x57, 0x28, 0x35, 0x6f, 0x18, 0xbd, 0x77, 0xd0, 0xeb, + 0x30, 0x65, 0x5b, 0x96, 0xab, 0xf6, 0x0c, 0xdc, 0xc6, 0x9e, 0x03, 0xae, 0xd7, 0xc8, 0x94, 0x7e, + 0xf7, 0xe5, 0x72, 0x71, 0x87, 0xc8, 0x5b, 0x4d, 0xa5, 0x42, 0x7a, 0xb1, 0x1b, 0x9d, 0x86, 0x31, + 0xdb, 0xf8, 0x50, 0x73, 0x31, 0x4d, 0xbc, 0x72, 0x74, 0xc8, 0x02, 0x1f, 0x52, 0xa3, 0xbd, 0x76, + 0x58, 0xfb, 0x5d, 0xfc, 0x4c, 0x81, 0x9e, 0x77, 0x2d, 0xff, 0xcd, 0x5f, 0xd4, 0x6d, 0xab, 0x4b, + 0x66, 0xf4, 0xc2, 0xfd, 0xe4, 0x15, 0x28, 0x72, 0xa7, 0xe0, 0x4e, 0x82, 0x02, 0x4e, 0xb2, 0xc3, + 0xae, 0x14, 0xd1, 0x05, 0xdd, 0x84, 0x9a, 0x65, 0x1b, 0x07, 0x86, 0xa9, 0x75, 0x04, 0xf0, 0x79, + 0x0a, 0x7c, 0xd2, 0x06, 0x9b, 0x16, 0x5d, 0x19, 0xd8, 0xf2, 0x26, 0x34, 0x22, 0x8b, 0xf7, 0x4d, + 0x1a, 0x98, 0x46, 0x66, 0xe8, 0x34, 0xe4, 0xef, 0x66, 0xe0, 0x3c, 0x57, 0xd5, 0xb4, 0x3e, 0x32, + 0x3b, 0x96, 0xa6, 0xbf, 0x70, 0x24, 0xe5, 0xcf, 0x33, 0x20, 0xc5, 0x26, 0x75, 0x16, 0x4e, 0x1b, + 0xc0, 0x2a, 0x3b, 0xdc, 0x64, 0xc7, 0xf7, 0xd6, 0xef, 0x64, 0xe0, 0x1c, 0x5f, 0x50, 0xcb, 0xdc, + 0xb7, 0x5e, 0x3c, 0xc2, 0x6f, 0x7b, 0x24, 0xcb, 0xe6, 0x93, 0xe8, 0x3e, 0xc3, 0x21, 0x21, 0x51, + 0x5c, 0x6c, 0xc3, 0x50, 0x96, 0xf1, 0x02, 0x97, 0xf6, 0xfd, 0x8c, 0xb7, 0x39, 0xc2, 0xc9, 0xc9, + 0xe9, 0xba, 0x4e, 0xc4, 0x19, 0xb2, 0xa3, 0x3b, 0xc3, 0xa7, 0x59, 0x98, 0x27, 0x49, 0x03, 0x9f, + 0xa4, 0x73, 0x16, 0x90, 0xcd, 0x43, 0xa1, 0x67, 0xe3, 0x7d, 0xe3, 0x88, 0x83, 0xc6, 0xef, 0xd0, + 0x32, 0x54, 0x1c, 0x57, 0xb3, 0x5d, 0x55, 0xdb, 0x27, 0x16, 0xa6, 0x2e, 0xac, 0x00, 0x15, 0xad, + 0x11, 0x09, 0xba, 0x0c, 0x80, 0x4d, 0x5d, 0xdd, 0xc3, 0xfb, 0x24, 0x87, 0x99, 0xa4, 0xab, 0xca, + 0x36, 0x32, 0x4a, 0x19, 0x9b, 0xfa, 0x3a, 0x15, 0x92, 0x24, 0xca, 0xc6, 0x24, 0xcd, 0x32, 0x3e, + 0x64, 0x11, 0xac, 0xa4, 0xf8, 0x02, 0x3f, 0xf1, 0x2a, 0x04, 0x13, 0xaf, 0x25, 0x00, 0xb2, 0x12, + 0x75, 0xbf, 0xa3, 0x1d, 0x38, 0xf4, 0xb5, 0xac, 0xa8, 0x94, 0x89, 0xe4, 0x6d, 0x22, 0xa0, 0x21, + 0x2a, 0x0c, 0x89, 0x6f, 0xb2, 0x5b, 0xe1, 0xfc, 0xeb, 0x25, 0x1f, 0x92, 0x94, 0x11, 0x2b, 0x43, + 0xb2, 0x31, 0x09, 0xc3, 0xa4, 0x78, 0xf5, 0xa3, 0x7e, 0x95, 0x09, 0xf8, 0xd5, 0x78, 0x8c, 0xb0, + 0x08, 0x65, 0xc3, 0x51, 0x39, 0xd2, 0x39, 0xfa, 0x88, 0x92, 0xe1, 0xec, 0xd0, 0x7b, 0xf9, 0xdb, + 0xd4, 0x11, 0x13, 0xd2, 0xbd, 0x63, 0x59, 0x7a, 0x19, 0x2a, 0xcc, 0xb6, 0x6a, 0x20, 0xf1, 0x03, + 0x26, 0xda, 0x1e, 0x21, 0xfd, 0x5b, 0x24, 0x3c, 0x9f, 0x94, 0xf8, 0xdd, 0xef, 0xe8, 0xf2, 0x06, + 0xa0, 0x1d, 0xdb, 0xfa, 0x00, 0xb7, 0x83, 0xf4, 0x34, 0xf6, 0x1c, 0xe5, 0x37, 0x60, 0x36, 0xa4, + 0x86, 0x67, 0xd0, 0x97, 0xa1, 0xda, 0x63, 0x62, 0xd5, 0xd1, 0x3a, 0xc2, 0x55, 0x2b, 0x5c, 0xb6, + 0xab, 0x75, 0x5c, 0xf9, 0x5b, 0x45, 0x28, 0xdc, 0xdf, 0x23, 0xb7, 0xa9, 0x2e, 0x7d, 0x0d, 0xa6, + 0xfd, 0x2c, 0x2a, 0xc0, 0x07, 0x53, 0x9e, 0x74, 0x87, 0x13, 0xc3, 0x87, 0xd8, 0x76, 0xfc, 0x04, + 0x5f, 0xdc, 0x92, 0xe5, 0x38, 0xae, 0xe6, 0xf6, 0x1d, 0xea, 0xd6, 0xd3, 0xc1, 0xe5, 0xb0, 0x47, + 0xaf, 0xec, 0xd2, 0x66, 0x85, 0x77, 0x43, 0xaf, 0x42, 0xd9, 0x71, 0x6d, 0xac, 0x75, 0x09, 0xa0, + 0x79, 0xba, 0x15, 0xea, 0x7c, 0x83, 0x97, 0x76, 0x69, 0x43, 0xab, 0xa9, 0x94, 0x58, 0x97, 0x96, + 0x1e, 0xa9, 0x1a, 0x14, 0x8e, 0x57, 0xb0, 0x59, 0x23, 0xcf, 0x24, 0x4f, 0x27, 0x3a, 0x8a, 0x63, + 0xe8, 0x28, 0xb1, 0x61, 0x6b, 0x24, 0x0d, 0x67, 0xd9, 0x1f, 0xa6, 0x3a, 0x4a, 0xe3, 0xcc, 0x83, + 0x8f, 0x5b, 0x73, 0xd1, 0x1d, 0x68, 0xf8, 0x68, 0x13, 0x9c, 0x74, 0xcd, 0xd5, 0x54, 0xd3, 0x32, + 0xdb, 0xb8, 0x51, 0xa6, 0x50, 0x4c, 0x71, 0x28, 0xf2, 0xdb, 0x44, 0xa8, 0xcc, 0x7b, 0xdd, 0xb7, + 0x78, 0x6f, 0x2a, 0x47, 0xaf, 0x02, 0x8a, 0x2b, 0x6a, 0x00, 0x35, 0xdd, 0x4c, 0x6c, 0x0c, 0x7a, + 0x05, 0xd0, 0xbe, 0x71, 0x14, 0xcd, 0x93, 0x2b, 0x94, 0xe2, 0xeb, 0xb4, 0x25, 0x98, 0x20, 0x6f, + 0xc2, 0x4c, 0xbc, 0x24, 0x51, 0x1d, 0x9e, 0xa1, 0xd7, 0xed, 0x68, 0x2d, 0xe2, 0x21, 0x9c, 0x4b, + 0xae, 0x41, 0x4c, 0x8d, 0x58, 0x83, 0x98, 0xc3, 0x29, 0xc5, 0x07, 0xd7, 0x72, 0xb5, 0x0e, 0x5b, + 0xc6, 0x34, 0x5d, 0x46, 0x99, 0x4a, 0xe8, 0xfc, 0x97, 0xa1, 0x62, 0x98, 0x1d, 0xc3, 0xc4, 0xac, + 0xbd, 0x46, 0xdb, 0x81, 0x89, 0x44, 0x07, 0x1b, 0x77, 0x2d, 0x97, 0x77, 0xa8, 0xb3, 0x0e, 0x4c, + 0x44, 0x3a, 0xc8, 0xef, 0x42, 0x81, 0x79, 0x2d, 0xaa, 0x40, 0xb1, 0xb5, 0xfd, 0xde, 0xda, 0xbd, + 0x56, 0xb3, 0x3e, 0x81, 0xa6, 0xa0, 0xfc, 0x70, 0xe7, 0xde, 0xfd, 0xb5, 0x66, 0x6b, 0xfb, 0x4e, + 0x3d, 0x83, 0xa6, 0x01, 0x6e, 0xdf, 0xdf, 0xda, 0x6a, 0x3d, 0x78, 0x40, 0xee, 0xb3, 0xa4, 0x99, + 0xdf, 0x6f, 0x34, 0xeb, 0x39, 0x54, 0x85, 0x52, 0x73, 0xe3, 0xde, 0x06, 0x6d, 0x9c, 0x94, 0x3f, + 0xc9, 0x01, 0x62, 0x1b, 0x62, 0x1d, 0x1f, 0x18, 0xe6, 0x49, 0x5e, 0xcd, 0xcf, 0x66, 0x23, 0x87, + 0x1d, 0x7c, 0xf2, 0x78, 0x0e, 0x9e, 0xe8, 0x3a, 0xc5, 0x53, 0x75, 0x9d, 0xd2, 0x49, 0x5c, 0x47, + 0xfe, 0x59, 0x16, 0x66, 0x43, 0x66, 0xe0, 0x6c, 0x7a, 0x66, 0xb0, 0x86, 0xe8, 0x6e, 0x72, 0x28, + 0xdd, 0x25, 0x02, 0x98, 0x3f, 0x55, 0x00, 0x0b, 0x27, 0x02, 0xf0, 0x4f, 0x19, 0x01, 0x60, 0xe8, + 0x0d, 0x71, 0x7c, 0x47, 0x0e, 0x01, 0x93, 0x19, 0x0a, 0xcc, 0x20, 0xea, 0xcc, 0x9e, 0x9c, 0x3a, + 0x73, 0x29, 0xd4, 0x29, 0xcf, 0xc3, 0x5c, 0x78, 0xb9, 0xbc, 0xb0, 0xf3, 0xbd, 0x0c, 0xd4, 0x59, + 0xc3, 0x49, 0x0a, 0x94, 0x67, 0xe5, 0x76, 0xf2, 0x9b, 0x30, 0x13, 0x98, 0x9d, 0x5f, 0x47, 0xb4, + 0xa8, 0x30, 0x5e, 0x47, 0x64, 0x9d, 0x15, 0xde, 0x2e, 0xff, 0x30, 0x2b, 0xc6, 0x9f, 0xb4, 0x8e, + 0x98, 0xb8, 0xbc, 0x7f, 0x81, 0x7a, 0x60, 0x79, 0xc1, 0x94, 0xba, 0xe6, 0x2f, 0x90, 0xe5, 0xd6, + 0xa1, 0xae, 0xbc, 0x28, 0x99, 0x8b, 0x74, 0xbd, 0xcd, 0xaa, 0x93, 0xa1, 0x14, 0x7a, 0x32, 0x35, + 0x85, 0xce, 0x07, 0x53, 0xe8, 0x16, 0xd4, 0xd8, 0x92, 0x55, 0xc3, 0x6c, 0x77, 0xfa, 0x3a, 0xf6, + 0xf7, 0x47, 0x04, 0x1b, 0x51, 0x91, 0x6c, 0xf1, 0x7e, 0xca, 0x34, 0x1b, 0x28, 0xee, 0xe5, 0xc7, + 0x82, 0xe0, 0x47, 0x2c, 0x74, 0x86, 0xd5, 0x0e, 0x2a, 0x74, 0xfe, 0x3c, 0x07, 0xd3, 0xe1, 0xde, + 0x09, 0x0e, 0x92, 0x19, 0xe2, 0x20, 0xd9, 0xb4, 0xbc, 0x2d, 0x37, 0x5a, 0xde, 0x16, 0x4e, 0xc4, + 0x26, 0x4f, 0x21, 0x11, 0xcb, 0x9f, 0x42, 0x22, 0x56, 0x38, 0xfd, 0x44, 0xac, 0x78, 0x72, 0x36, + 0x29, 0xa5, 0xb1, 0xc9, 0xbf, 0xc1, 0x7c, 0xb2, 0x37, 0x21, 0x09, 0x4a, 0xde, 0xf0, 0x0c, 0x7b, + 0xe7, 0x11, 0xf7, 0xf2, 0x67, 0x19, 0x68, 0x04, 0x82, 0xd6, 0x09, 0x4f, 0x1e, 0xce, 0x8c, 0x73, + 0x5c, 0x38, 0x9f, 0x30, 0x4b, 0xbe, 0x0f, 0xc6, 0xa4, 0x7b, 0x9f, 0xaa, 0xb2, 0x43, 0xa8, 0xea, + 0xeb, 0xe2, 0xa9, 0x6f, 0x1b, 0xa6, 0xe1, 0x1c, 0x9e, 0x10, 0x9c, 0xf1, 0xa6, 0x29, 0x5f, 0x00, + 0x29, 0xe9, 0xe1, 0x3c, 0x46, 0xfc, 0x39, 0x0b, 0x95, 0x5d, 0xcd, 0x15, 0xe3, 0xce, 0x2e, 0xc9, + 0x38, 0x51, 0x31, 0xbd, 0x05, 0x53, 0x74, 0x83, 0x92, 0x34, 0x41, 0xd7, 0x5c, 0x3c, 0xd6, 0xbe, + 0xac, 0x8a, 0xa1, 0x4d, 0xcd, 0xc5, 0x68, 0x0b, 0x6a, 0x7e, 0x89, 0x9c, 0x29, 0x1b, 0x67, 0x83, + 0x4e, 0xfb, 0x83, 0xa9, 0xba, 0x1b, 0x30, 0xeb, 0x68, 0x2e, 0xee, 0x74, 0x0c, 0x9a, 0xaa, 0x1f, + 0x98, 0x9a, 0xdb, 0xb7, 0xf9, 0x9b, 0x92, 0x82, 0xbc, 0xa6, 0x5d, 0xd1, 0x22, 0xff, 0x21, 0x0b, + 0x45, 0xfe, 0x26, 0x33, 0xae, 0xc3, 0xfd, 0x3b, 0x94, 0x7a, 0x96, 0x63, 0xb8, 0x82, 0x2a, 0x2b, + 0xab, 0xe7, 0x7d, 0x5f, 0xe1, 0x3a, 0x77, 0x78, 0x07, 0xc5, 0xeb, 0x8a, 0xde, 0x84, 0x59, 0xdf, + 0x74, 0x4f, 0xf1, 0x33, 0xce, 0x21, 0xb9, 0x24, 0x0e, 0xf1, 0xf9, 0xe0, 0x2e, 0x7e, 0xc6, 0xe8, + 0xe3, 0x0a, 0x4c, 0x85, 0x86, 0xb3, 0x0c, 0x51, 0xa9, 0x06, 0x7b, 0xa2, 0x15, 0x98, 0x25, 0xef, + 0x29, 0x81, 0xe3, 0x0e, 0xca, 0x12, 0xec, 0x98, 0x63, 0x86, 0x34, 0x79, 0xe7, 0x1c, 0x4d, 0xf2, + 0xb6, 0xb7, 0xea, 0x65, 0x7e, 0x58, 0x57, 0xf9, 0x9b, 0x10, 0x1d, 0xc1, 0x8e, 0x69, 0xfd, 0x09, + 0xb7, 0x68, 0x1b, 0x1d, 0xf3, 0x32, 0x14, 0xe8, 0x19, 0x83, 0xd3, 0x28, 0xd2, 0x38, 0x55, 0xf3, + 0x17, 0x4f, 0xab, 0x6e, 0x0a, 0x6f, 0x96, 0x37, 0x21, 0x4f, 0x05, 0x68, 0x11, 0xca, 0xec, 0x54, + 0xc2, 0xec, 0x77, 0x29, 0xbe, 0x79, 0xa5, 0x44, 0x05, 0xdb, 0xfd, 0x2e, 0x92, 0x61, 0xd2, 0xb4, + 0x74, 0x91, 0x99, 0x4d, 0x73, 0x1c, 0x0a, 0xdb, 0x96, 0x8e, 0x5b, 0x4d, 0x85, 0xb6, 0xc9, 0x9b, + 0x50, 0x8b, 0xe0, 0x4a, 0x5e, 0xcc, 0x7a, 0x9a, 0xed, 0x12, 0x95, 0x7b, 0xbc, 0xd2, 0x9e, 0x57, + 0x68, 0x01, 0x66, 0x9b, 0x4a, 0x48, 0x10, 0x37, 0x4c, 0x1d, 0x1f, 0x89, 0x03, 0x48, 0x7a, 0x23, + 0xff, 0x26, 0x03, 0xb3, 0x5c, 0xd5, 0xc9, 0x5e, 0xae, 0x9e, 0x8f, 0xcf, 0xbc, 0x04, 0xb5, 0xae, + 0x76, 0xa4, 0xd2, 0x03, 0x09, 0x56, 0x2e, 0xe5, 0xd5, 0xd6, 0xa9, 0xae, 0x76, 0xe4, 0x57, 0x47, + 0xe5, 0x5f, 0x65, 0x60, 0x2e, 0xbc, 0x2c, 0xce, 0xa5, 0xaf, 0x01, 0x88, 0x17, 0x79, 0x6f, 0x9e, + 0x33, 0x7c, 0x9e, 0x65, 0x51, 0x81, 0x6e, 0x2a, 0x65, 0xde, 0xa9, 0x95, 0x5c, 0xa1, 0xcd, 0x9e, + 0x46, 0x85, 0x76, 0x8c, 0x72, 0xfd, 0x6f, 0xb3, 0xde, 0x72, 0x4e, 0xf8, 0xea, 0x30, 0xfe, 0xfa, + 0x53, 0xb6, 0x69, 0xf6, 0xb8, 0xdb, 0x34, 0x37, 0xfa, 0x36, 0x9d, 0x4c, 0xdb, 0xa6, 0x77, 0x60, + 0xaa, 0xdf, 0xeb, 0x58, 0x9a, 0xae, 0xda, 0xd8, 0xe9, 0x77, 0x5c, 0x7e, 0x52, 0x25, 0xc7, 0x5d, + 0x88, 0x80, 0xfa, 0xb0, 0xc7, 0x0f, 0x6c, 0xfa, 0x1d, 0x57, 0xa9, 0xf6, 0x03, 0x77, 0xf2, 0x37, + 0xfd, 0xda, 0x7c, 0xac, 0xeb, 0xe0, 0x6d, 0xfa, 0x32, 0x14, 0xe9, 0x99, 0xb2, 0x77, 0xb0, 0x18, + 0xdd, 0xa9, 0x05, 0xd2, 0xdc, 0xd2, 0xd1, 0x35, 0x98, 0x3c, 0xd4, 0x9c, 0x43, 0xfe, 0xc1, 0xd4, + 0x8c, 0x38, 0x4c, 0xa3, 0x8f, 0xdb, 0xd4, 0x9c, 0x43, 0x85, 0x36, 0xcb, 0x7f, 0xcf, 0x42, 0x95, + 0x04, 0x3c, 0x61, 0x02, 0xb4, 0x1a, 0xdd, 0x50, 0x95, 0xd5, 0x73, 0x81, 0xf5, 0xf9, 0xb1, 0x31, + 0xb0, 0xab, 0x22, 0x24, 0x90, 0x4d, 0x27, 0x81, 0x5c, 0x80, 0x04, 0xe2, 0x47, 0xa5, 0xf9, 0x11, + 0x8e, 0x4a, 0xdf, 0x85, 0x73, 0xde, 0x79, 0x61, 0x60, 0x3f, 0x92, 0x97, 0x80, 0x11, 0x36, 0xc7, + 0xac, 0x18, 0xeb, 0xcb, 0x9c, 0x78, 0x38, 0x2d, 0x1e, 0x3b, 0x9c, 0xa6, 0xc4, 0xbf, 0x52, 0x6a, + 0xfc, 0x6b, 0x7a, 0x07, 0x62, 0xe1, 0xb7, 0x55, 0xf4, 0xaf, 0x30, 0xe3, 0xf4, 0xdb, 0x6d, 0xec, + 0x38, 0xfb, 0xfd, 0x8e, 0xca, 0x99, 0x9e, 0x79, 0x43, 0xdd, 0x6f, 0xd8, 0x61, 0x14, 0xff, 0xcb, + 0xac, 0xe7, 0x4f, 0x5b, 0xda, 0x53, 0xcc, 0xa2, 0xc4, 0x3f, 0x39, 0xa7, 0x3e, 0x8f, 0x38, 0x9c, + 0x1a, 0x57, 0xf3, 0xa9, 0x71, 0x95, 0x1d, 0x10, 0xc4, 0xa0, 0xe4, 0xf9, 0xe1, 0x8f, 0xfc, 0x63, + 0xe2, 0xd3, 0x48, 0xec, 0x9f, 0x0b, 0xd2, 0xf2, 0x17, 0xfe, 0x31, 0x72, 0x52, 0x9e, 0xff, 0xd5, + 0x8c, 0x4d, 0x3f, 0xf5, 0x17, 0x75, 0x2a, 0xaf, 0x11, 0xe3, 0xa3, 0x70, 0x0b, 0x8a, 0x2c, 0x0c, + 0x88, 0xc5, 0xa7, 0xc4, 0x01, 0x0f, 0x6e, 0x12, 0x07, 0xc4, 0x90, 0x58, 0x08, 0x08, 0xf6, 0x7a, + 0xbe, 0x21, 0x60, 0x09, 0x16, 0x13, 0x81, 0xe4, 0x2e, 0xff, 0x79, 0x06, 0x10, 0x6f, 0x3f, 0x51, + 0x65, 0x69, 0x4c, 0x5f, 0x5f, 0x87, 0x1a, 0xab, 0x1d, 0xa9, 0xa3, 0xbb, 0xfc, 0x34, 0x1b, 0xe1, + 0x25, 0xa7, 0x5e, 0x01, 0x29, 0x17, 0x28, 0x20, 0xc9, 0x4f, 0xbc, 0xd4, 0x33, 0x54, 0xf6, 0xb9, + 0x11, 0x2e, 0xfb, 0xc4, 0x1f, 0x33, 0x4a, 0xdd, 0xc7, 0xcf, 0x90, 0xbd, 0xba, 0x4f, 0x70, 0xd3, + 0x66, 0x46, 0xdf, 0xb4, 0x3f, 0xc9, 0x78, 0x9f, 0x26, 0x44, 0x3e, 0x48, 0xf9, 0x2a, 0x40, 0x2f, + 0xff, 0x38, 0xe7, 0x7f, 0x10, 0x11, 0xf9, 0x74, 0xe5, 0xab, 0x49, 0x38, 0xe9, 0xb1, 0x64, 0x32, + 0xfd, 0x1d, 0xed, 0x32, 0x54, 0x13, 0xbe, 0x73, 0xab, 0x38, 0x81, 0xa3, 0xbb, 0x94, 0x30, 0x58, + 0x38, 0x6e, 0x18, 0x2c, 0x26, 0x84, 0xc1, 0x57, 0x61, 0xd2, 0xc4, 0x47, 0xe2, 0x0c, 0x74, 0x80, + 0x15, 0x69, 0x37, 0xf9, 0x23, 0xa8, 0xae, 0x6b, 0x6e, 0xfb, 0xf0, 0xd8, 0xfe, 0xf6, 0x1f, 0x50, + 0xb2, 0x59, 0x83, 0xd8, 0x4d, 0x52, 0xe0, 0x6b, 0xd1, 0x80, 0x6a, 0xba, 0x9d, 0xbc, 0xbe, 0xf2, + 0x1f, 0x01, 0xea, 0xd1, 0x66, 0xd4, 0x84, 0x29, 0x7e, 0xe0, 0xcf, 0x8a, 0x92, 0x7c, 0x13, 0x2d, + 0x45, 0xbf, 0x3f, 0x0d, 0x7d, 0x5d, 0xbe, 0x39, 0xa1, 0x54, 0xf7, 0x02, 0x62, 0x74, 0x13, 0xf8, + 0x37, 0x02, 0xea, 0x01, 0xf6, 0x3f, 0x65, 0x8f, 0xa8, 0xf0, 0xcf, 0x05, 0x36, 0x27, 0x94, 0xf2, + 0x9e, 0x90, 0x05, 0xa6, 0xa0, 0x53, 0x6e, 0xe4, 0x8c, 0x1a, 0x9b, 0x42, 0x28, 0x04, 0xf9, 0x53, + 0x60, 0x62, 0xf4, 0x5f, 0xde, 0x97, 0x0b, 0x1d, 0xc3, 0x71, 0xbd, 0x9a, 0x4f, 0xc2, 0x67, 0xb4, + 0xbe, 0x06, 0x3e, 0x69, 0x22, 0x44, 0xff, 0x07, 0xf3, 0x7c, 0xbc, 0x83, 0x5d, 0x55, 0xf3, 0xbf, + 0x60, 0xe0, 0xe5, 0x9f, 0x6b, 0x51, 0x55, 0x89, 0x1f, 0x5d, 0x6c, 0x4e, 0x28, 0x73, 0x7b, 0x09, + 0xcd, 0x68, 0x0d, 0xaa, 0xbc, 0xae, 0xbe, 0x47, 0x92, 0x04, 0x5e, 0x06, 0xba, 0x10, 0xad, 0xe2, + 0x05, 0x5f, 0xd7, 0x37, 0x27, 0x94, 0x8a, 0xe5, 0x4b, 0x09, 0x4e, 0x5c, 0x45, 0x9b, 0x26, 0xb3, + 0x3c, 0x91, 0x5e, 0x8a, 0xea, 0x08, 0xbd, 0x4c, 0x12, 0x9c, 0xac, 0x80, 0x98, 0x98, 0x8a, 0x6b, + 0x21, 0xa6, 0x2a, 0x45, 0x4d, 0x15, 0x3d, 0xc2, 0x21, 0xa6, 0xb2, 0x84, 0x8c, 0x80, 0xcc, 0x07, + 0x53, 0x90, 0xcb, 0x51, 0x90, 0x63, 0x47, 0x24, 0x04, 0x64, 0xcb, 0x13, 0xa2, 0x07, 0x30, 0x1b, + 0x44, 0x41, 0x18, 0x1c, 0xa8, 0x1e, 0x39, 0x11, 0x8c, 0xa8, 0xd5, 0x67, 0xac, 0x68, 0x1b, 0x7a, + 0x04, 0x73, 0x5c, 0xeb, 0x3e, 0x0d, 0xb1, 0x42, 0x6d, 0x85, 0xaa, 0xbd, 0x12, 0x55, 0x9b, 0x90, + 0xd0, 0x6c, 0x4e, 0x28, 0xc8, 0x8a, 0x35, 0x12, 0xc4, 0x05, 0xc1, 0x30, 0xab, 0x55, 0xa3, 0x88, + 0x27, 0x54, 0x59, 0x08, 0xe2, 0x4e, 0x40, 0x8c, 0xee, 0xc0, 0xb4, 0xd0, 0xc2, 0x0d, 0xc7, 0x4e, + 0xfb, 0x2f, 0xc6, 0xd4, 0x44, 0x2d, 0x27, 0x9e, 0xce, 0x4d, 0xf7, 0x00, 0x66, 0x85, 0xa2, 0xae, + 0xf6, 0x14, 0x73, 0x9a, 0xa4, 0xe7, 0xfd, 0x49, 0xe9, 0x51, 0xec, 0x5d, 0x85, 0xa0, 0xe7, 0x44, + 0xdb, 0x08, 0x7a, 0xa1, 0x45, 0x0a, 0xf4, 0x6a, 0x51, 0xf4, 0x52, 0x33, 0x73, 0x82, 0x9e, 0x13, + 0x6b, 0x44, 0x4f, 0xe0, 0x9c, 0x50, 0x1c, 0xb6, 0x4b, 0x9d, 0x6a, 0xbe, 0x1a, 0xd3, 0x9c, 0x6c, + 0x18, 0xb1, 0xe6, 0x90, 0x65, 0xd6, 0x7c, 0xea, 0xa7, 0x9e, 0x38, 0x13, 0xdd, 0x4e, 0xf1, 0x9c, + 0x8a, 0x6c, 0x27, 0xc7, 0x97, 0xa2, 0x2d, 0xa8, 0x0b, 0x15, 0x3a, 0x8f, 0xa1, 0x0d, 0x14, 0x3d, + 0xea, 0x4a, 0xce, 0x11, 0x36, 0x27, 0x94, 0x9a, 0x13, 0x6e, 0x59, 0x2f, 0x43, 0x91, 0xb7, 0xca, + 0xef, 0xc0, 0x14, 0xe7, 0x59, 0x1e, 0x92, 0xff, 0x13, 0xca, 0x36, 0xbf, 0x16, 0x94, 0xbd, 0x18, + 0xa3, 0x6c, 0xd6, 0x4e, 0x39, 0xdb, 0xef, 0x2d, 0xff, 0x15, 0x60, 0x26, 0xd6, 0x01, 0x6d, 0x24, + 0xb3, 0xf6, 0xc5, 0x34, 0xd6, 0x66, 0x43, 0x63, 0xb4, 0x7d, 0x2b, 0x81, 0xb6, 0x17, 0x13, 0x69, + 0xdb, 0x53, 0x10, 0xe0, 0xed, 0x8d, 0x64, 0xde, 0xbe, 0x98, 0xc6, 0xdb, 0xd1, 0x49, 0x70, 0x53, + 0xbe, 0x95, 0x44, 0xdc, 0x17, 0x92, 0x89, 0xdb, 0x53, 0x11, 0x64, 0xee, 0xff, 0x1f, 0xc2, 0xdc, + 0x2f, 0x0d, 0x63, 0x6e, 0x4f, 0x6b, 0x32, 0x75, 0xaf, 0x27, 0x52, 0xf7, 0x52, 0x0a, 0x75, 0x7b, + 0xca, 0x42, 0xdc, 0xbd, 0x91, 0xcc, 0xdd, 0x17, 0xd3, 0xb8, 0xdb, 0xc7, 0x2a, 0x44, 0xde, 0xb7, + 0x12, 0xc8, 0x7b, 0x31, 0x91, 0xbc, 0x7d, 0x83, 0xf9, 0xec, 0xfd, 0x56, 0x12, 0x7b, 0x5f, 0x48, + 0x66, 0x6f, 0x1f, 0xe9, 0x00, 0x7d, 0x3f, 0x1c, 0x44, 0xdf, 0x57, 0x06, 0xd2, 0xb7, 0xa7, 0x2f, + 0x81, 0xbf, 0x1f, 0x0f, 0xe4, 0xef, 0xab, 0x83, 0xf9, 0xdb, 0x53, 0x9c, 0x44, 0xe0, 0x1b, 0xc9, + 0x04, 0x7e, 0x31, 0x8d, 0xc0, 0x7d, 0xd8, 0x43, 0x0c, 0xbe, 0x99, 0xc2, 0xe0, 0xcb, 0xa9, 0x0c, + 0xee, 0x29, 0x8a, 0x50, 0xf8, 0xc3, 0x41, 0x14, 0x7e, 0x65, 0x20, 0x85, 0xfb, 0x08, 0xc6, 0x39, + 0xfc, 0xf1, 0x40, 0x0e, 0xbf, 0x3a, 0x98, 0xc3, 0x7d, 0x04, 0x13, 0x48, 0xfc, 0x7f, 0x07, 0x93, + 0xf8, 0xb5, 0x21, 0x24, 0xee, 0xe9, 0x4e, 0x64, 0xf1, 0xf5, 0x44, 0x16, 0x5f, 0x4a, 0x61, 0x71, + 0x7f, 0x67, 0x05, 0x69, 0x7c, 0x3b, 0x95, 0xc6, 0x2f, 0x0f, 0xa0, 0x71, 0x4f, 0x57, 0x8c, 0xc7, + 0x01, 0x4a, 0xa2, 0x79, 0xf5, 0x2f, 0x33, 0x50, 0xda, 0xe2, 0x3a, 0xd0, 0x16, 0x54, 0x19, 0x6d, + 0xf2, 0xff, 0x70, 0x07, 0xa7, 0xc8, 0xd2, 0x10, 0x2e, 0x46, 0x4d, 0x28, 0xdf, 0xc1, 0x2e, 0xd7, + 0x35, 0x20, 0x57, 0x96, 0x06, 0x11, 0x32, 0x99, 0x14, 0xc3, 0x32, 0x6d, 0x52, 0xa1, 0x68, 0x2a, + 0x0d, 0xe1, 0x66, 0xb4, 0x09, 0x15, 0x02, 0x2a, 0x6b, 0x73, 0xd0, 0xa0, 0xf4, 0x59, 0x1a, 0x48, + 0xd1, 0x08, 0xc3, 0xdc, 0xae, 0x58, 0x5e, 0x90, 0x4c, 0x47, 0x4b, 0xa3, 0xa5, 0x11, 0x39, 0x1b, + 0xbd, 0x03, 0x15, 0xea, 0xad, 0xfc, 0xab, 0xde, 0x81, 0xf9, 0xb4, 0x34, 0x98, 0xb2, 0xa9, 0x81, + 0xe9, 0x2e, 0xe5, 0xca, 0x06, 0x27, 0xd6, 0xd2, 0x10, 0xee, 0xe6, 0x06, 0xe6, 0xba, 0x06, 0x64, + 0xd8, 0xd2, 0x20, 0x02, 0x17, 0x16, 0x61, 0x0d, 0x21, 0x8b, 0xc4, 0x72, 0x6d, 0x69, 0x20, 0x95, + 0xa3, 0xf7, 0x61, 0x26, 0xb0, 0xb1, 0xf9, 0xbc, 0x46, 0xc8, 0xb9, 0xa5, 0x51, 0x88, 0x1d, 0xa9, + 0x80, 0x82, 0x5b, 0x9b, 0xab, 0x1f, 0x25, 0xf7, 0x96, 0x46, 0x22, 0x78, 0x62, 0x1d, 0xfa, 0x5c, + 0x71, 0x9c, 0x3d, 0x38, 0x09, 0x97, 0x86, 0x50, 0x3c, 0xda, 0x81, 0x29, 0x66, 0x2f, 0xa1, 0x6f, + 0x48, 0x36, 0x2e, 0x0d, 0xe3, 0x7a, 0x82, 0xaf, 0xcf, 0xc8, 0x42, 0xeb, 0x08, 0x59, 0xb9, 0x34, + 0x0a, 0xed, 0x13, 0x7c, 0x03, 0xb0, 0x0b, 0xf5, 0xa3, 0x64, 0xe7, 0xd2, 0x48, 0xf4, 0x8f, 0xf6, + 0x60, 0x36, 0x88, 0xbb, 0x78, 0xc2, 0x48, 0x59, 0xba, 0x34, 0x5a, 0x18, 0x40, 0x77, 0xa1, 0x1a, + 0xfc, 0x9b, 0x02, 0x0d, 0xcc, 0xd7, 0xa5, 0xc1, 0x71, 0x00, 0xbd, 0x07, 0x35, 0x41, 0xda, 0x62, + 0xb2, 0x43, 0x13, 0x77, 0x69, 0x78, 0x4c, 0x40, 0x6f, 0x40, 0x9e, 0x26, 0xdc, 0x68, 0x3e, 0xb9, + 0xaa, 0x22, 0x2d, 0xa4, 0xa4, 0xee, 0xe8, 0x11, 0xd4, 0x19, 0xc9, 0x73, 0xd5, 0xf7, 0x3b, 0x7a, + 0xc2, 0x94, 0x22, 0xff, 0x9b, 0x26, 0x4c, 0x29, 0xf6, 0xff, 0xe4, 0xff, 0x40, 0x3d, 0xe4, 0xac, + 0x44, 0x76, 0x79, 0xb0, 0xbf, 0x12, 0xcd, 0xf2, 0x10, 0x97, 0x25, 0x6a, 0x76, 0x61, 0x3a, 0xf0, + 0x8b, 0x16, 0x91, 0xc4, 0x1d, 0x3d, 0xfc, 0x33, 0x99, 0x74, 0x29, 0xa5, 0x83, 0xaf, 0x54, 0x05, + 0x14, 0x31, 0x0d, 0x91, 0x5e, 0x19, 0x66, 0x1d, 0xa2, 0xfc, 0xea, 0x50, 0x03, 0x71, 0x40, 0x42, + 0x6e, 0x9a, 0x0c, 0x48, 0xf4, 0x5f, 0xb1, 0x04, 0x40, 0xe2, 0xff, 0x6e, 0xbd, 0x07, 0xb5, 0xa0, + 0x8f, 0x46, 0x6c, 0x98, 0xfc, 0x47, 0x55, 0xd0, 0x86, 0x69, 0x3f, 0x18, 0xbd, 0x0f, 0x33, 0xe1, + 0x18, 0x46, 0x84, 0xa1, 0x09, 0x25, 0xff, 0xc3, 0x13, 0xa6, 0x87, 0x94, 0x5f, 0x6b, 0x48, 0x1c, + 0x0c, 0xfc, 0x13, 0x13, 0xdc, 0x58, 0xf1, 0x3f, 0x6e, 0x82, 0x1b, 0x2b, 0xe1, 0x47, 0x9a, 0xf5, + 0xb9, 0x27, 0xf4, 0x8f, 0xe9, 0x0f, 0x56, 0x0c, 0xeb, 0x06, 0x49, 0x77, 0x2d, 0xf3, 0x46, 0x6f, + 0x6f, 0xaf, 0x40, 0xcf, 0x69, 0x5f, 0xff, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x32, 0xf7, + 0x85, 0x57, 0x45, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCMetainfoClient interface { + DRPCConn() drpc.Conn + + // Bucket + CreateBucket(ctx context.Context, in *BucketCreateRequest) (*BucketCreateResponse, error) + GetBucket(ctx context.Context, in *BucketGetRequest) (*BucketGetResponse, error) + DeleteBucket(ctx context.Context, in *BucketDeleteRequest) (*BucketDeleteResponse, error) + ListBuckets(ctx context.Context, in *BucketListRequest) (*BucketListResponse, error) + SetBucketAttribution(ctx context.Context, in *BucketSetAttributionRequest) (*BucketSetAttributionResponse, error) + // Object + BeginObject(ctx context.Context, in *ObjectBeginRequest) (*ObjectBeginResponse, error) + CommitObject(ctx context.Context, in *ObjectCommitRequest) (*ObjectCommitResponse, error) + GetObject(ctx context.Context, in *ObjectGetRequest) (*ObjectGetResponse, error) + ListObjects(ctx context.Context, in *ObjectListRequest) (*ObjectListResponse, error) + BeginDeleteObject(ctx context.Context, in *ObjectBeginDeleteRequest) (*ObjectBeginDeleteResponse, error) + FinishDeleteObject(ctx context.Context, in *ObjectFinishDeleteRequest) (*ObjectFinishDeleteResponse, error) + BeginSegment(ctx context.Context, in *SegmentBeginRequest) (*SegmentBeginResponse, error) + CommitSegment(ctx context.Context, in *SegmentCommitRequest) (*SegmentCommitResponse, error) + MakeInlineSegment(ctx context.Context, in *SegmentMakeInlineRequest) (*SegmentMakeInlineResponse, error) + BeginDeleteSegment(ctx context.Context, in *SegmentBeginDeleteRequest) (*SegmentBeginDeleteResponse, error) + FinishDeleteSegment(ctx context.Context, in *SegmentFinishDeleteRequest) (*SegmentFinishDeleteResponse, error) + ListSegments(ctx context.Context, in *SegmentListRequest) (*SegmentListResponse, error) + DownloadSegment(ctx context.Context, in *SegmentDownloadRequest) (*SegmentDownloadResponse, error) + Batch(ctx context.Context, in *BatchRequest) (*BatchResponse, error) + CreateSegmentOld(ctx context.Context, in *SegmentWriteRequestOld) (*SegmentWriteResponseOld, error) + CommitSegmentOld(ctx context.Context, in *SegmentCommitRequestOld) (*SegmentCommitResponseOld, error) + SegmentInfoOld(ctx context.Context, in *SegmentInfoRequestOld) (*SegmentInfoResponseOld, error) + DownloadSegmentOld(ctx context.Context, in *SegmentDownloadRequestOld) (*SegmentDownloadResponseOld, error) + DeleteSegmentOld(ctx context.Context, in *SegmentDeleteRequestOld) (*SegmentDeleteResponseOld, error) + ListSegmentsOld(ctx context.Context, in *ListSegmentsRequestOld) (*ListSegmentsResponseOld, error) + SetAttributionOld(ctx context.Context, in *SetAttributionRequestOld) (*SetAttributionResponseOld, error) + ProjectInfo(ctx context.Context, in *ProjectInfoRequest) (*ProjectInfoResponse, error) +} + +type drpcMetainfoClient struct { + cc drpc.Conn +} + +func NewDRPCMetainfoClient(cc drpc.Conn) DRPCMetainfoClient { + return &drpcMetainfoClient{cc} +} + +func (c *drpcMetainfoClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcMetainfoClient) CreateBucket(ctx context.Context, in *BucketCreateRequest) (*BucketCreateResponse, error) { + out := new(BucketCreateResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/CreateBucket", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) GetBucket(ctx context.Context, in *BucketGetRequest) (*BucketGetResponse, error) { + out := new(BucketGetResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/GetBucket", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) DeleteBucket(ctx context.Context, in *BucketDeleteRequest) (*BucketDeleteResponse, error) { + out := new(BucketDeleteResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/DeleteBucket", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) ListBuckets(ctx context.Context, in *BucketListRequest) (*BucketListResponse, error) { + out := new(BucketListResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/ListBuckets", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) SetBucketAttribution(ctx context.Context, in *BucketSetAttributionRequest) (*BucketSetAttributionResponse, error) { + out := new(BucketSetAttributionResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/SetBucketAttribution", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) BeginObject(ctx context.Context, in *ObjectBeginRequest) (*ObjectBeginResponse, error) { + out := new(ObjectBeginResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/BeginObject", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) CommitObject(ctx context.Context, in *ObjectCommitRequest) (*ObjectCommitResponse, error) { + out := new(ObjectCommitResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/CommitObject", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) GetObject(ctx context.Context, in *ObjectGetRequest) (*ObjectGetResponse, error) { + out := new(ObjectGetResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/GetObject", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) ListObjects(ctx context.Context, in *ObjectListRequest) (*ObjectListResponse, error) { + out := new(ObjectListResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/ListObjects", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) BeginDeleteObject(ctx context.Context, in *ObjectBeginDeleteRequest) (*ObjectBeginDeleteResponse, error) { + out := new(ObjectBeginDeleteResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/BeginDeleteObject", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) FinishDeleteObject(ctx context.Context, in *ObjectFinishDeleteRequest) (*ObjectFinishDeleteResponse, error) { + out := new(ObjectFinishDeleteResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/FinishDeleteObject", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) BeginSegment(ctx context.Context, in *SegmentBeginRequest) (*SegmentBeginResponse, error) { + out := new(SegmentBeginResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/BeginSegment", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) CommitSegment(ctx context.Context, in *SegmentCommitRequest) (*SegmentCommitResponse, error) { + out := new(SegmentCommitResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/CommitSegment", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) MakeInlineSegment(ctx context.Context, in *SegmentMakeInlineRequest) (*SegmentMakeInlineResponse, error) { + out := new(SegmentMakeInlineResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/MakeInlineSegment", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) BeginDeleteSegment(ctx context.Context, in *SegmentBeginDeleteRequest) (*SegmentBeginDeleteResponse, error) { + out := new(SegmentBeginDeleteResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/BeginDeleteSegment", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) FinishDeleteSegment(ctx context.Context, in *SegmentFinishDeleteRequest) (*SegmentFinishDeleteResponse, error) { + out := new(SegmentFinishDeleteResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/FinishDeleteSegment", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) ListSegments(ctx context.Context, in *SegmentListRequest) (*SegmentListResponse, error) { + out := new(SegmentListResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/ListSegments", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) DownloadSegment(ctx context.Context, in *SegmentDownloadRequest) (*SegmentDownloadResponse, error) { + out := new(SegmentDownloadResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/DownloadSegment", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) Batch(ctx context.Context, in *BatchRequest) (*BatchResponse, error) { + out := new(BatchResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/Batch", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) CreateSegmentOld(ctx context.Context, in *SegmentWriteRequestOld) (*SegmentWriteResponseOld, error) { + out := new(SegmentWriteResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/CreateSegmentOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) CommitSegmentOld(ctx context.Context, in *SegmentCommitRequestOld) (*SegmentCommitResponseOld, error) { + out := new(SegmentCommitResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/CommitSegmentOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) SegmentInfoOld(ctx context.Context, in *SegmentInfoRequestOld) (*SegmentInfoResponseOld, error) { + out := new(SegmentInfoResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/SegmentInfoOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) DownloadSegmentOld(ctx context.Context, in *SegmentDownloadRequestOld) (*SegmentDownloadResponseOld, error) { + out := new(SegmentDownloadResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/DownloadSegmentOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) DeleteSegmentOld(ctx context.Context, in *SegmentDeleteRequestOld) (*SegmentDeleteResponseOld, error) { + out := new(SegmentDeleteResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/DeleteSegmentOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) ListSegmentsOld(ctx context.Context, in *ListSegmentsRequestOld) (*ListSegmentsResponseOld, error) { + out := new(ListSegmentsResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/ListSegmentsOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) SetAttributionOld(ctx context.Context, in *SetAttributionRequestOld) (*SetAttributionResponseOld, error) { + out := new(SetAttributionResponseOld) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/SetAttributionOld", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcMetainfoClient) ProjectInfo(ctx context.Context, in *ProjectInfoRequest) (*ProjectInfoResponse, error) { + out := new(ProjectInfoResponse) + err := c.cc.Invoke(ctx, "/metainfo.Metainfo/ProjectInfo", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCMetainfoServer interface { + // Bucket + CreateBucket(context.Context, *BucketCreateRequest) (*BucketCreateResponse, error) + GetBucket(context.Context, *BucketGetRequest) (*BucketGetResponse, error) + DeleteBucket(context.Context, *BucketDeleteRequest) (*BucketDeleteResponse, error) + ListBuckets(context.Context, *BucketListRequest) (*BucketListResponse, error) + SetBucketAttribution(context.Context, *BucketSetAttributionRequest) (*BucketSetAttributionResponse, error) + // Object + BeginObject(context.Context, *ObjectBeginRequest) (*ObjectBeginResponse, error) + CommitObject(context.Context, *ObjectCommitRequest) (*ObjectCommitResponse, error) + GetObject(context.Context, *ObjectGetRequest) (*ObjectGetResponse, error) + ListObjects(context.Context, *ObjectListRequest) (*ObjectListResponse, error) + BeginDeleteObject(context.Context, *ObjectBeginDeleteRequest) (*ObjectBeginDeleteResponse, error) + FinishDeleteObject(context.Context, *ObjectFinishDeleteRequest) (*ObjectFinishDeleteResponse, error) + BeginSegment(context.Context, *SegmentBeginRequest) (*SegmentBeginResponse, error) + CommitSegment(context.Context, *SegmentCommitRequest) (*SegmentCommitResponse, error) + MakeInlineSegment(context.Context, *SegmentMakeInlineRequest) (*SegmentMakeInlineResponse, error) + BeginDeleteSegment(context.Context, *SegmentBeginDeleteRequest) (*SegmentBeginDeleteResponse, error) + FinishDeleteSegment(context.Context, *SegmentFinishDeleteRequest) (*SegmentFinishDeleteResponse, error) + ListSegments(context.Context, *SegmentListRequest) (*SegmentListResponse, error) + DownloadSegment(context.Context, *SegmentDownloadRequest) (*SegmentDownloadResponse, error) + Batch(context.Context, *BatchRequest) (*BatchResponse, error) + CreateSegmentOld(context.Context, *SegmentWriteRequestOld) (*SegmentWriteResponseOld, error) + CommitSegmentOld(context.Context, *SegmentCommitRequestOld) (*SegmentCommitResponseOld, error) + SegmentInfoOld(context.Context, *SegmentInfoRequestOld) (*SegmentInfoResponseOld, error) + DownloadSegmentOld(context.Context, *SegmentDownloadRequestOld) (*SegmentDownloadResponseOld, error) + DeleteSegmentOld(context.Context, *SegmentDeleteRequestOld) (*SegmentDeleteResponseOld, error) + ListSegmentsOld(context.Context, *ListSegmentsRequestOld) (*ListSegmentsResponseOld, error) + SetAttributionOld(context.Context, *SetAttributionRequestOld) (*SetAttributionResponseOld, error) + ProjectInfo(context.Context, *ProjectInfoRequest) (*ProjectInfoResponse, error) +} + +type DRPCMetainfoDescription struct{} + +func (DRPCMetainfoDescription) NumMethods() int { return 27 } + +func (DRPCMetainfoDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/metainfo.Metainfo/CreateBucket", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + CreateBucket( + ctx, + in1.(*BucketCreateRequest), + ) + }, DRPCMetainfoServer.CreateBucket, true + case 1: + return "/metainfo.Metainfo/GetBucket", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + GetBucket( + ctx, + in1.(*BucketGetRequest), + ) + }, DRPCMetainfoServer.GetBucket, true + case 2: + return "/metainfo.Metainfo/DeleteBucket", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + DeleteBucket( + ctx, + in1.(*BucketDeleteRequest), + ) + }, DRPCMetainfoServer.DeleteBucket, true + case 3: + return "/metainfo.Metainfo/ListBuckets", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + ListBuckets( + ctx, + in1.(*BucketListRequest), + ) + }, DRPCMetainfoServer.ListBuckets, true + case 4: + return "/metainfo.Metainfo/SetBucketAttribution", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + SetBucketAttribution( + ctx, + in1.(*BucketSetAttributionRequest), + ) + }, DRPCMetainfoServer.SetBucketAttribution, true + case 5: + return "/metainfo.Metainfo/BeginObject", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + BeginObject( + ctx, + in1.(*ObjectBeginRequest), + ) + }, DRPCMetainfoServer.BeginObject, true + case 6: + return "/metainfo.Metainfo/CommitObject", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + CommitObject( + ctx, + in1.(*ObjectCommitRequest), + ) + }, DRPCMetainfoServer.CommitObject, true + case 7: + return "/metainfo.Metainfo/GetObject", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + GetObject( + ctx, + in1.(*ObjectGetRequest), + ) + }, DRPCMetainfoServer.GetObject, true + case 8: + return "/metainfo.Metainfo/ListObjects", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + ListObjects( + ctx, + in1.(*ObjectListRequest), + ) + }, DRPCMetainfoServer.ListObjects, true + case 9: + return "/metainfo.Metainfo/BeginDeleteObject", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + BeginDeleteObject( + ctx, + in1.(*ObjectBeginDeleteRequest), + ) + }, DRPCMetainfoServer.BeginDeleteObject, true + case 10: + return "/metainfo.Metainfo/FinishDeleteObject", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + FinishDeleteObject( + ctx, + in1.(*ObjectFinishDeleteRequest), + ) + }, DRPCMetainfoServer.FinishDeleteObject, true + case 11: + return "/metainfo.Metainfo/BeginSegment", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + BeginSegment( + ctx, + in1.(*SegmentBeginRequest), + ) + }, DRPCMetainfoServer.BeginSegment, true + case 12: + return "/metainfo.Metainfo/CommitSegment", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + CommitSegment( + ctx, + in1.(*SegmentCommitRequest), + ) + }, DRPCMetainfoServer.CommitSegment, true + case 13: + return "/metainfo.Metainfo/MakeInlineSegment", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + MakeInlineSegment( + ctx, + in1.(*SegmentMakeInlineRequest), + ) + }, DRPCMetainfoServer.MakeInlineSegment, true + case 14: + return "/metainfo.Metainfo/BeginDeleteSegment", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + BeginDeleteSegment( + ctx, + in1.(*SegmentBeginDeleteRequest), + ) + }, DRPCMetainfoServer.BeginDeleteSegment, true + case 15: + return "/metainfo.Metainfo/FinishDeleteSegment", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + FinishDeleteSegment( + ctx, + in1.(*SegmentFinishDeleteRequest), + ) + }, DRPCMetainfoServer.FinishDeleteSegment, true + case 16: + return "/metainfo.Metainfo/ListSegments", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + ListSegments( + ctx, + in1.(*SegmentListRequest), + ) + }, DRPCMetainfoServer.ListSegments, true + case 17: + return "/metainfo.Metainfo/DownloadSegment", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + DownloadSegment( + ctx, + in1.(*SegmentDownloadRequest), + ) + }, DRPCMetainfoServer.DownloadSegment, true + case 18: + return "/metainfo.Metainfo/Batch", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + Batch( + ctx, + in1.(*BatchRequest), + ) + }, DRPCMetainfoServer.Batch, true + case 19: + return "/metainfo.Metainfo/CreateSegmentOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + CreateSegmentOld( + ctx, + in1.(*SegmentWriteRequestOld), + ) + }, DRPCMetainfoServer.CreateSegmentOld, true + case 20: + return "/metainfo.Metainfo/CommitSegmentOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + CommitSegmentOld( + ctx, + in1.(*SegmentCommitRequestOld), + ) + }, DRPCMetainfoServer.CommitSegmentOld, true + case 21: + return "/metainfo.Metainfo/SegmentInfoOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + SegmentInfoOld( + ctx, + in1.(*SegmentInfoRequestOld), + ) + }, DRPCMetainfoServer.SegmentInfoOld, true + case 22: + return "/metainfo.Metainfo/DownloadSegmentOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + DownloadSegmentOld( + ctx, + in1.(*SegmentDownloadRequestOld), + ) + }, DRPCMetainfoServer.DownloadSegmentOld, true + case 23: + return "/metainfo.Metainfo/DeleteSegmentOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + DeleteSegmentOld( + ctx, + in1.(*SegmentDeleteRequestOld), + ) + }, DRPCMetainfoServer.DeleteSegmentOld, true + case 24: + return "/metainfo.Metainfo/ListSegmentsOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + ListSegmentsOld( + ctx, + in1.(*ListSegmentsRequestOld), + ) + }, DRPCMetainfoServer.ListSegmentsOld, true + case 25: + return "/metainfo.Metainfo/SetAttributionOld", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + SetAttributionOld( + ctx, + in1.(*SetAttributionRequestOld), + ) + }, DRPCMetainfoServer.SetAttributionOld, true + case 26: + return "/metainfo.Metainfo/ProjectInfo", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCMetainfoServer). + ProjectInfo( + ctx, + in1.(*ProjectInfoRequest), + ) + }, DRPCMetainfoServer.ProjectInfo, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterMetainfo(mux drpc.Mux, impl DRPCMetainfoServer) error { + return mux.Register(impl, DRPCMetainfoDescription{}) +} + +type DRPCMetainfo_CreateBucketStream interface { + drpc.Stream + SendAndClose(*BucketCreateResponse) error +} + +type drpcMetainfoCreateBucketStream struct { + drpc.Stream +} + +func (x *drpcMetainfoCreateBucketStream) SendAndClose(m *BucketCreateResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_GetBucketStream interface { + drpc.Stream + SendAndClose(*BucketGetResponse) error +} + +type drpcMetainfoGetBucketStream struct { + drpc.Stream +} + +func (x *drpcMetainfoGetBucketStream) SendAndClose(m *BucketGetResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_DeleteBucketStream interface { + drpc.Stream + SendAndClose(*BucketDeleteResponse) error +} + +type drpcMetainfoDeleteBucketStream struct { + drpc.Stream +} + +func (x *drpcMetainfoDeleteBucketStream) SendAndClose(m *BucketDeleteResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_ListBucketsStream interface { + drpc.Stream + SendAndClose(*BucketListResponse) error +} + +type drpcMetainfoListBucketsStream struct { + drpc.Stream +} + +func (x *drpcMetainfoListBucketsStream) SendAndClose(m *BucketListResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_SetBucketAttributionStream interface { + drpc.Stream + SendAndClose(*BucketSetAttributionResponse) error +} + +type drpcMetainfoSetBucketAttributionStream struct { + drpc.Stream +} + +func (x *drpcMetainfoSetBucketAttributionStream) SendAndClose(m *BucketSetAttributionResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_BeginObjectStream interface { + drpc.Stream + SendAndClose(*ObjectBeginResponse) error +} + +type drpcMetainfoBeginObjectStream struct { + drpc.Stream +} + +func (x *drpcMetainfoBeginObjectStream) SendAndClose(m *ObjectBeginResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_CommitObjectStream interface { + drpc.Stream + SendAndClose(*ObjectCommitResponse) error +} + +type drpcMetainfoCommitObjectStream struct { + drpc.Stream +} + +func (x *drpcMetainfoCommitObjectStream) SendAndClose(m *ObjectCommitResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_GetObjectStream interface { + drpc.Stream + SendAndClose(*ObjectGetResponse) error +} + +type drpcMetainfoGetObjectStream struct { + drpc.Stream +} + +func (x *drpcMetainfoGetObjectStream) SendAndClose(m *ObjectGetResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_ListObjectsStream interface { + drpc.Stream + SendAndClose(*ObjectListResponse) error +} + +type drpcMetainfoListObjectsStream struct { + drpc.Stream +} + +func (x *drpcMetainfoListObjectsStream) SendAndClose(m *ObjectListResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_BeginDeleteObjectStream interface { + drpc.Stream + SendAndClose(*ObjectBeginDeleteResponse) error +} + +type drpcMetainfoBeginDeleteObjectStream struct { + drpc.Stream +} + +func (x *drpcMetainfoBeginDeleteObjectStream) SendAndClose(m *ObjectBeginDeleteResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_FinishDeleteObjectStream interface { + drpc.Stream + SendAndClose(*ObjectFinishDeleteResponse) error +} + +type drpcMetainfoFinishDeleteObjectStream struct { + drpc.Stream +} + +func (x *drpcMetainfoFinishDeleteObjectStream) SendAndClose(m *ObjectFinishDeleteResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_BeginSegmentStream interface { + drpc.Stream + SendAndClose(*SegmentBeginResponse) error +} + +type drpcMetainfoBeginSegmentStream struct { + drpc.Stream +} + +func (x *drpcMetainfoBeginSegmentStream) SendAndClose(m *SegmentBeginResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_CommitSegmentStream interface { + drpc.Stream + SendAndClose(*SegmentCommitResponse) error +} + +type drpcMetainfoCommitSegmentStream struct { + drpc.Stream +} + +func (x *drpcMetainfoCommitSegmentStream) SendAndClose(m *SegmentCommitResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_MakeInlineSegmentStream interface { + drpc.Stream + SendAndClose(*SegmentMakeInlineResponse) error +} + +type drpcMetainfoMakeInlineSegmentStream struct { + drpc.Stream +} + +func (x *drpcMetainfoMakeInlineSegmentStream) SendAndClose(m *SegmentMakeInlineResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_BeginDeleteSegmentStream interface { + drpc.Stream + SendAndClose(*SegmentBeginDeleteResponse) error +} + +type drpcMetainfoBeginDeleteSegmentStream struct { + drpc.Stream +} + +func (x *drpcMetainfoBeginDeleteSegmentStream) SendAndClose(m *SegmentBeginDeleteResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_FinishDeleteSegmentStream interface { + drpc.Stream + SendAndClose(*SegmentFinishDeleteResponse) error +} + +type drpcMetainfoFinishDeleteSegmentStream struct { + drpc.Stream +} + +func (x *drpcMetainfoFinishDeleteSegmentStream) SendAndClose(m *SegmentFinishDeleteResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_ListSegmentsStream interface { + drpc.Stream + SendAndClose(*SegmentListResponse) error +} + +type drpcMetainfoListSegmentsStream struct { + drpc.Stream +} + +func (x *drpcMetainfoListSegmentsStream) SendAndClose(m *SegmentListResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_DownloadSegmentStream interface { + drpc.Stream + SendAndClose(*SegmentDownloadResponse) error +} + +type drpcMetainfoDownloadSegmentStream struct { + drpc.Stream +} + +func (x *drpcMetainfoDownloadSegmentStream) SendAndClose(m *SegmentDownloadResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_BatchStream interface { + drpc.Stream + SendAndClose(*BatchResponse) error +} + +type drpcMetainfoBatchStream struct { + drpc.Stream +} + +func (x *drpcMetainfoBatchStream) SendAndClose(m *BatchResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_CreateSegmentOldStream interface { + drpc.Stream + SendAndClose(*SegmentWriteResponseOld) error +} + +type drpcMetainfoCreateSegmentOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoCreateSegmentOldStream) SendAndClose(m *SegmentWriteResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_CommitSegmentOldStream interface { + drpc.Stream + SendAndClose(*SegmentCommitResponseOld) error +} + +type drpcMetainfoCommitSegmentOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoCommitSegmentOldStream) SendAndClose(m *SegmentCommitResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_SegmentInfoOldStream interface { + drpc.Stream + SendAndClose(*SegmentInfoResponseOld) error +} + +type drpcMetainfoSegmentInfoOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoSegmentInfoOldStream) SendAndClose(m *SegmentInfoResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_DownloadSegmentOldStream interface { + drpc.Stream + SendAndClose(*SegmentDownloadResponseOld) error +} + +type drpcMetainfoDownloadSegmentOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoDownloadSegmentOldStream) SendAndClose(m *SegmentDownloadResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_DeleteSegmentOldStream interface { + drpc.Stream + SendAndClose(*SegmentDeleteResponseOld) error +} + +type drpcMetainfoDeleteSegmentOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoDeleteSegmentOldStream) SendAndClose(m *SegmentDeleteResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_ListSegmentsOldStream interface { + drpc.Stream + SendAndClose(*ListSegmentsResponseOld) error +} + +type drpcMetainfoListSegmentsOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoListSegmentsOldStream) SendAndClose(m *ListSegmentsResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_SetAttributionOldStream interface { + drpc.Stream + SendAndClose(*SetAttributionResponseOld) error +} + +type drpcMetainfoSetAttributionOldStream struct { + drpc.Stream +} + +func (x *drpcMetainfoSetAttributionOldStream) SendAndClose(m *SetAttributionResponseOld) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCMetainfo_ProjectInfoStream interface { + drpc.Stream + SendAndClose(*ProjectInfoResponse) error +} + +type drpcMetainfoProjectInfoStream struct { + drpc.Stream +} + +func (x *drpcMetainfoProjectInfoStream) SendAndClose(m *ProjectInfoResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/metainfo.proto b/vendor/storj.io/common/pb/metainfo.proto new file mode 100644 index 000000000..366a261c1 --- /dev/null +++ b/vendor/storj.io/common/pb/metainfo.proto @@ -0,0 +1,632 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package metainfo; + +import "encryption.proto"; +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "node.proto"; +import "pointerdb.proto"; +import "orders.proto"; + +// NOTE!!! +// * If someone will add stream_id or segment_id to existing or new message in this protobuf +// then Batch method (satellite/metainfo/batch.go) needs to be updated as well + +// Metainfo it's a satellite RPC service +service Metainfo { + // Bucket + rpc CreateBucket(BucketCreateRequest) returns (BucketCreateResponse); + rpc GetBucket(BucketGetRequest) returns (BucketGetResponse); + rpc DeleteBucket(BucketDeleteRequest) returns (BucketDeleteResponse); + rpc ListBuckets(BucketListRequest) returns (BucketListResponse); + rpc SetBucketAttribution(BucketSetAttributionRequest) returns (BucketSetAttributionResponse); + // Object + rpc BeginObject(ObjectBeginRequest) returns (ObjectBeginResponse); + rpc CommitObject(ObjectCommitRequest) returns (ObjectCommitResponse); + rpc GetObject(ObjectGetRequest) returns (ObjectGetResponse); + rpc ListObjects(ObjectListRequest) returns (ObjectListResponse); + rpc BeginDeleteObject(ObjectBeginDeleteRequest) returns (ObjectBeginDeleteResponse); + rpc FinishDeleteObject(ObjectFinishDeleteRequest) returns (ObjectFinishDeleteResponse); + + rpc BeginSegment(SegmentBeginRequest) returns (SegmentBeginResponse); + rpc CommitSegment(SegmentCommitRequest) returns (SegmentCommitResponse); + rpc MakeInlineSegment(SegmentMakeInlineRequest) returns (SegmentMakeInlineResponse); + rpc BeginDeleteSegment(SegmentBeginDeleteRequest) returns (SegmentBeginDeleteResponse); + rpc FinishDeleteSegment(SegmentFinishDeleteRequest) returns (SegmentFinishDeleteResponse); + rpc ListSegments(SegmentListRequest) returns (SegmentListResponse); + rpc DownloadSegment(SegmentDownloadRequest) returns (SegmentDownloadResponse); + + rpc Batch(BatchRequest) returns (BatchResponse); + + rpc CreateSegmentOld(SegmentWriteRequestOld) returns (SegmentWriteResponseOld); + rpc CommitSegmentOld(SegmentCommitRequestOld) returns (SegmentCommitResponseOld); + rpc SegmentInfoOld(SegmentInfoRequestOld) returns (SegmentInfoResponseOld); + rpc DownloadSegmentOld(SegmentDownloadRequestOld) returns (SegmentDownloadResponseOld); + rpc DeleteSegmentOld(SegmentDeleteRequestOld) returns (SegmentDeleteResponseOld); + rpc ListSegmentsOld(ListSegmentsRequestOld) returns (ListSegmentsResponseOld); + rpc SetAttributionOld(SetAttributionRequestOld) returns (SetAttributionResponseOld); + + rpc ProjectInfo(ProjectInfoRequest) returns (ProjectInfoResponse); +} + +message RequestHeader { + bytes api_key = 1; + bytes user_agent = 2; +} + +message Bucket { + bytes name = 1; + encryption.CipherSuite path_cipher = 2; + + google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + int64 default_segment_size = 4; + pointerdb.RedundancyScheme default_redundancy_scheme = 5; + encryption.EncryptionParameters default_encryption_parameters = 6; + bytes partner_id = 7; +} + +message BucketListItem { + bytes name = 1; + + google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message BucketCreateRequest { + RequestHeader header = 15; + + bytes name = 1; + encryption.CipherSuite path_cipher = 2; + + int64 default_segment_size = 3; + pointerdb.RedundancyScheme default_redundancy_scheme = 4; + encryption.EncryptionParameters default_encryption_parameters = 5; + bytes partner_id = 6; +} + +message BucketCreateResponse { + Bucket bucket = 1; +} + +message BucketGetRequest { + RequestHeader header = 15; + + bytes name = 1; +} + +message BucketGetResponse { + Bucket bucket = 1; +} + +message BucketDeleteRequest { + RequestHeader header = 15; + + bytes name = 1; +} + +message BucketDeleteResponse { + Bucket bucket = 1; +} + +message BucketListRequest { + RequestHeader header = 15; + + bytes cursor = 1; + int32 limit = 2; + int32 direction = 3; +} + +message BucketListResponse { + repeated BucketListItem items = 1; + bool more = 2; +} + +message BucketSetAttributionRequest { + RequestHeader header = 15; + + bytes name = 1; + bytes partner_id = 2; +} + +message BucketSetAttributionResponse { +} + +message AddressedOrderLimit { + orders.OrderLimit limit = 1; + node.NodeAddress storage_node_address = 2; +} + +message SegmentWriteRequestOld { + RequestHeader header = 15; + + bytes bucket = 1; + bytes path = 2; + int64 segment = 3; + pointerdb.RedundancyScheme redundancy = 4; + int64 max_encrypted_segment_size = 5; + google.protobuf.Timestamp expiration = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message SegmentWriteResponseOld { + repeated AddressedOrderLimit addressed_limits = 1; + bytes root_piece_id = 2 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; +} + +message SegmentCommitRequestOld { + RequestHeader header = 15; + + bytes bucket = 1; + bytes path = 2; + int64 segment = 3; + pointerdb.Pointer pointer = 4; + repeated orders.OrderLimit original_limits = 5; +} + +message SegmentCommitResponseOld { + pointerdb.Pointer pointer = 1; +} + +message SegmentDownloadRequestOld { + RequestHeader header = 15; + + bytes bucket = 1; + bytes path = 2; + int64 segment = 3; +} + +message SegmentDownloadResponseOld { + repeated AddressedOrderLimit addressed_limits = 1; + pointerdb.Pointer pointer = 2; + bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; +} + +message SegmentInfoRequestOld { + RequestHeader header = 15; + + bytes bucket = 1; + bytes path = 2; + int64 segment = 3; +} + +message SegmentInfoResponseOld { + pointerdb.Pointer pointer = 2; +} + +message SegmentDeleteRequestOld { + RequestHeader header = 15; + + bytes bucket = 1; + bytes path = 2; + int64 segment = 3; +} + +message SegmentDeleteResponseOld { + repeated AddressedOrderLimit addressed_limits = 1; + bytes private_key = 2 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; +} + +message ListSegmentsRequestOld { + RequestHeader header = 15; + + bytes bucket = 1; + bytes prefix = 2; + bytes start_after = 3; + bytes end_before = 4 [deprecated=true]; + bool recursive = 5; + int32 limit = 6; + fixed32 meta_flags = 7; +} + +message ListSegmentsResponseOld { + message Item { + bytes path = 1; + pointerdb.Pointer pointer = 2; + bool is_prefix = 3; + } + + repeated Item items = 1; + bool more = 2; +} + +message SetAttributionRequestOld { + RequestHeader header = 15; + + bytes bucket_name = 1; + bytes partner_id = 2 ; +} + +message SetAttributionResponseOld { +} + +message ProjectInfoRequest { + RequestHeader header = 15; +} + +message ProjectInfoResponse { + bytes project_salt = 1; +} + +//--------------------------- +// Object +//--------------------------- + +message Object { + enum Status { + INVALID = 0; + UPLOADING = 1; + COMMITTING = 2; + COMMITTED = 3; + DELETING = 4; + } + + bytes bucket = 1; + bytes encrypted_path = 2; + int32 version = 3; + Status status = 4; + + bytes stream_id = 5 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + + google.protobuf.Timestamp created_at = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp status_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp expires_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes encrypted_metadata_nonce = 9 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_metadata = 10; + + int64 fixed_segment_size = 11; + pointerdb.RedundancyScheme redundancy_scheme = 12; + encryption.EncryptionParameters encryption_parameters = 13; + + int64 total_size = 14; // total size of object + int64 inline_size = 15; // size of inline part of object + int64 remote_size = 16; // size of remote part of object +} + +message ObjectBeginRequest { + RequestHeader header = 15; + + bytes bucket = 1; + bytes encrypted_path = 2; + int32 version = 3; + + google.protobuf.Timestamp expires_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + pointerdb.RedundancyScheme redundancy_scheme = 7; // can be zero + encryption.EncryptionParameters encryption_parameters = 8; // can be zero +} + +message ObjectBeginResponse { + bytes bucket = 1; + bytes encrypted_path = 2; + int32 version = 3; + + bytes stream_id = 4 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + + pointerdb.RedundancyScheme redundancy_scheme = 5; + encryption.EncryptionParameters encryption_parameters = 6; +} + +message ObjectCommitRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + + bytes encrypted_metadata_nonce = 2 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_metadata = 3; // TODO: set maximum size limit +} + +message ObjectCommitResponse { +} + +message ObjectGetRequest { + RequestHeader header = 15; + + bytes bucket = 1; + bytes encrypted_path = 2; + int32 version = 3; +} + +message ObjectGetResponse { + Object object = 1; +} + +message ObjectListRequest { + RequestHeader header = 15; + + bytes bucket = 1; + bytes encrypted_prefix = 2; + bytes encrypted_cursor = 3; + bool recursive = 4; + int32 limit = 5; + + ObjectListItemIncludes object_includes = 6; +} + +message ObjectListResponse { + repeated ObjectListItem items = 1; + bool more = 2; +} + +message ObjectListItem { + bytes encrypted_path = 1; + int32 version = 2; + Object.Status status = 3; + + google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp status_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp expires_at = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes encrypted_metadata_nonce = 7 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_metadata = 8; +} + +message ObjectListItemIncludes { + bool metadata = 1; +} + +message ObjectBeginDeleteRequest { + RequestHeader header = 15; + + bytes bucket = 1; + bytes encrypted_path = 2; + int32 version = 3; +} + +message ObjectBeginDeleteResponse { + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + Object object = 2; +} + +message ObjectFinishDeleteRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; +} + +message ObjectFinishDeleteResponse { +} + +// only for satellite use +message SatStreamID { + bytes bucket = 1; + bytes encrypted_path = 2; + int32 version = 3; + + pointerdb.RedundancyScheme redundancy = 4; + + google.protobuf.Timestamp creation_date = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp expiration_date = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes satellite_signature = 9; +} + +//--------------------------- +// Segment +//--------------------------- + +message Segment { + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + SegmentPosition position = 2; + + bytes encrypted_key_nonce = 3 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_key = 4; + + int64 size_encrypted_data = 5; // refers to segment size not piece size + + bytes encrypted_inline_data = 6; + repeated Piece pieces = 7; +} + +message Piece { + int32 piece_num = 1; + bytes node = 2[(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; +} + +message SegmentPosition { + int32 part_number = 1; + int32 index = 2; +} + +message SegmentBeginRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + SegmentPosition position = 2; + + int64 max_order_limit = 3; +} + +message SegmentBeginResponse { + bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false]; + repeated AddressedOrderLimit addressed_limits = 2; + bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; +} + +message SegmentCommitRequest { + RequestHeader header = 15; + + bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false]; + + bytes encrypted_key_nonce = 2 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_key = 3; + + int64 size_encrypted_data = 4; // refers to segment size not piece size + + repeated SegmentPieceUploadResult upload_result = 5; +} + +message SegmentPieceUploadResult { + int32 piece_num = 1; + bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + orders.PieceHash hash = 3; +} + +// only for satellite use +message SatSegmentID { + SatStreamID stream_id = 1; + int32 part_number = 2; + int32 index = 3; + + // TODO we have redundancy in SatStreamID, do we need it here? + // pointerdb.RedundancyScheme redundancy = 4; + bytes root_piece_id = 5 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + repeated AddressedOrderLimit original_order_limits = 6; + google.protobuf.Timestamp creation_date = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes satellite_signature = 8; +} + +message SegmentCommitResponse { + int32 successful_pieces = 1; +} + +message SegmentMakeInlineRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + SegmentPosition position = 2; + + bytes encrypted_key_nonce = 3 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_key = 4; + + bytes encrypted_inline_data = 5; +} + +message SegmentMakeInlineResponse {} + +message SegmentBeginDeleteRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + SegmentPosition position = 2; +} + +message SegmentBeginDeleteResponse { + bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false]; + repeated AddressedOrderLimit addressed_limits = 2; + bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; +} + +message SegmentFinishDeleteRequest { + RequestHeader header = 15; + + bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false]; + repeated SegmentPieceDeleteResult results = 2; +} + +message SegmentPieceDeleteResult { + int32 piece_num = 1; + bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + orders.PieceHash hash = 3; +} + +message SegmentFinishDeleteResponse {} + +message SegmentListRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + SegmentPosition cursor_position = 2; + int32 limit = 3; +} + +message SegmentListResponse { + repeated SegmentListItem items = 1; + bool more = 2; +} + +message SegmentListItem { + SegmentPosition position = 1; +} + +message SegmentDownloadRequest { + RequestHeader header = 15; + + bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false]; + SegmentPosition cursor_position = 2; +} + +message SegmentDownloadResponse { + bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false]; + + repeated AddressedOrderLimit addressed_limits = 2; + bytes private_key = 3 [(gogoproto.customtype) = "PiecePrivateKey", (gogoproto.nullable) = false]; + + bytes encrypted_inline_data = 4; + int64 segment_size = 5; + bytes encrypted_key_nonce = 6 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false]; + bytes encrypted_key = 7; + + SegmentPosition next = 8; // can be nil +} + +message BatchRequest { + RequestHeader header = 15; // the only header that matters in a batch. + + // headers for specific BatchRequestItems are ignored entirely + repeated BatchRequestItem requests = 1; +} + +message BatchRequestItem { + oneof Request { + BucketCreateRequest bucket_create = 1; + BucketGetRequest bucket_get = 2; + BucketDeleteRequest bucket_delete = 3; + BucketListRequest bucket_list = 4; + BucketSetAttributionRequest bucket_set_attribution = 5; + + ObjectBeginRequest object_begin = 6; + ObjectCommitRequest object_commit = 7; + ObjectGetRequest object_get = 8; + ObjectListRequest object_list = 9; + ObjectBeginDeleteRequest object_begin_delete = 10; + ObjectFinishDeleteRequest object_finish_delete = 11; + + SegmentBeginRequest segment_begin = 12; + SegmentCommitRequest segment_commit = 13; + SegmentMakeInlineRequest segment_make_inline = 14; + + SegmentBeginDeleteRequest segment_begin_delete = 15; + SegmentFinishDeleteRequest segment_finish_delete = 16; + + SegmentListRequest segment_list = 17; + SegmentDownloadRequest segment_download = 18; + } +} + +message BatchResponse { + repeated BatchResponseItem responses = 1; +} + +message BatchResponseItem { + oneof Response { + BucketCreateResponse bucket_create = 1; + BucketGetResponse bucket_get = 2; + BucketDeleteResponse bucket_delete = 3; + BucketListResponse bucket_list = 4; + BucketSetAttributionResponse bucket_set_attribution = 5; + + ObjectBeginResponse object_begin = 6; + ObjectCommitResponse object_commit = 7; + ObjectGetResponse object_get = 8; + ObjectListResponse object_list = 9; + ObjectBeginDeleteResponse object_begin_delete = 10; + ObjectFinishDeleteResponse object_finish_delete = 11; + + SegmentBeginResponse segment_begin = 12; + SegmentCommitResponse segment_commit = 13; + SegmentMakeInlineResponse segment_make_inline = 14; + + SegmentBeginDeleteResponse segment_begin_delete = 15; + SegmentFinishDeleteResponse segment_finish_delete = 16; + + SegmentListResponse segment_list = 17; + SegmentDownloadResponse segment_download = 18; + } +} diff --git a/vendor/storj.io/common/pb/node.pb.go b/vendor/storj.io/common/pb/node.pb.go new file mode 100644 index 000000000..0d4edd6d8 --- /dev/null +++ b/vendor/storj.io/common/pb/node.pb.go @@ -0,0 +1,489 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: node.proto + +package pb + +import ( + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// NodeType is an enum of possible node types +type NodeType int32 + +const ( + NodeType_INVALID NodeType = 0 + NodeType_SATELLITE NodeType = 1 + NodeType_STORAGE NodeType = 2 + NodeType_UPLINK NodeType = 3 + NodeType_BOOTSTRAP NodeType = 4 // Deprecated: Do not use. +) + +var NodeType_name = map[int32]string{ + 0: "INVALID", + 1: "SATELLITE", + 2: "STORAGE", + 3: "UPLINK", + 4: "BOOTSTRAP", +} + +var NodeType_value = map[string]int32{ + "INVALID": 0, + "SATELLITE": 1, + "STORAGE": 2, + "UPLINK": 3, + "BOOTSTRAP": 4, +} + +func (x NodeType) String() string { + return proto.EnumName(NodeType_name, int32(x)) +} + +func (NodeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{0} +} + +// NodeTransport is an enum of possible transports for the overlay network +type NodeTransport int32 + +const ( + NodeTransport_TCP_TLS_GRPC NodeTransport = 0 +) + +var NodeTransport_name = map[int32]string{ + 0: "TCP_TLS_GRPC", +} + +var NodeTransport_value = map[string]int32{ + "TCP_TLS_GRPC": 0, +} + +func (x NodeTransport) String() string { + return proto.EnumName(NodeTransport_name, int32(x)) +} + +func (NodeTransport) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{1} +} + +// TODO move statdb.Update() stuff out of here +// Node represents a node in the overlay network +// Node is info for a updating a single storagenode, used in the Update rpc calls +type Node struct { + Id NodeID `protobuf:"bytes,1,opt,name=id,proto3,customtype=NodeID" json:"id"` + Address *NodeAddress `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + DeprecatedLastIp string `protobuf:"bytes,14,opt,name=deprecated_last_ip,json=deprecatedLastIp,proto3" json:"deprecated_last_ip,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{0} +} +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetAddress() *NodeAddress { + if m != nil { + return m.Address + } + return nil +} + +// Deprecated: Do not use. +func (m *Node) GetDeprecatedLastIp() string { + if m != nil { + return m.DeprecatedLastIp + } + return "" +} + +// NodeAddress contains the information needed to communicate with a node on the network +type NodeAddress struct { + Transport NodeTransport `protobuf:"varint,1,opt,name=transport,proto3,enum=node.NodeTransport" json:"transport,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (m *NodeAddress) String() string { return proto.CompactTextString(m) } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{1} +} +func (m *NodeAddress) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeAddress.Unmarshal(m, b) +} +func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeAddress.Marshal(b, m, deterministic) +} +func (m *NodeAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAddress.Merge(m, src) +} +func (m *NodeAddress) XXX_Size() int { + return xxx_messageInfo_NodeAddress.Size(m) +} +func (m *NodeAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeAddress proto.InternalMessageInfo + +func (m *NodeAddress) GetTransport() NodeTransport { + if m != nil { + return m.Transport + } + return NodeTransport_TCP_TLS_GRPC +} + +func (m *NodeAddress) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// NodeOperator contains info about the storage node operator +type NodeOperator struct { + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeOperator) Reset() { *m = NodeOperator{} } +func (m *NodeOperator) String() string { return proto.CompactTextString(m) } +func (*NodeOperator) ProtoMessage() {} +func (*NodeOperator) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{2} +} +func (m *NodeOperator) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeOperator.Unmarshal(m, b) +} +func (m *NodeOperator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeOperator.Marshal(b, m, deterministic) +} +func (m *NodeOperator) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeOperator.Merge(m, src) +} +func (m *NodeOperator) XXX_Size() int { + return xxx_messageInfo_NodeOperator.Size(m) +} +func (m *NodeOperator) XXX_DiscardUnknown() { + xxx_messageInfo_NodeOperator.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeOperator proto.InternalMessageInfo + +func (m *NodeOperator) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *NodeOperator) GetWallet() string { + if m != nil { + return m.Wallet + } + return "" +} + +// NodeCapacity contains all relevant data about a nodes ability to store data +type NodeCapacity struct { + FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` // Deprecated: Do not use. + FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeCapacity) Reset() { *m = NodeCapacity{} } +func (m *NodeCapacity) String() string { return proto.CompactTextString(m) } +func (*NodeCapacity) ProtoMessage() {} +func (*NodeCapacity) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{3} +} +func (m *NodeCapacity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeCapacity.Unmarshal(m, b) +} +func (m *NodeCapacity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeCapacity.Marshal(b, m, deterministic) +} +func (m *NodeCapacity) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeCapacity.Merge(m, src) +} +func (m *NodeCapacity) XXX_Size() int { + return xxx_messageInfo_NodeCapacity.Size(m) +} +func (m *NodeCapacity) XXX_DiscardUnknown() { + xxx_messageInfo_NodeCapacity.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeCapacity proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *NodeCapacity) GetFreeBandwidth() int64 { + if m != nil { + return m.FreeBandwidth + } + return 0 +} + +func (m *NodeCapacity) GetFreeDisk() int64 { + if m != nil { + return m.FreeDisk + } + return 0 +} + +// Deprecated: use NodeOperator instead +type NodeMetadata struct { + Email string `protobuf:"bytes,1,opt,name=email,proto3" json:"email,omitempty"` + Wallet string `protobuf:"bytes,2,opt,name=wallet,proto3" json:"wallet,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeMetadata) Reset() { *m = NodeMetadata{} } +func (m *NodeMetadata) String() string { return proto.CompactTextString(m) } +func (*NodeMetadata) ProtoMessage() {} +func (*NodeMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{4} +} +func (m *NodeMetadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeMetadata.Unmarshal(m, b) +} +func (m *NodeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeMetadata.Marshal(b, m, deterministic) +} +func (m *NodeMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetadata.Merge(m, src) +} +func (m *NodeMetadata) XXX_Size() int { + return xxx_messageInfo_NodeMetadata.Size(m) +} +func (m *NodeMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetadata proto.InternalMessageInfo + +func (m *NodeMetadata) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *NodeMetadata) GetWallet() string { + if m != nil { + return m.Wallet + } + return "" +} + +// Deprecated: use NodeCapacity instead +type NodeRestrictions struct { + FreeBandwidth int64 `protobuf:"varint,1,opt,name=free_bandwidth,json=freeBandwidth,proto3" json:"free_bandwidth,omitempty"` + FreeDisk int64 `protobuf:"varint,2,opt,name=free_disk,json=freeDisk,proto3" json:"free_disk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeRestrictions) Reset() { *m = NodeRestrictions{} } +func (m *NodeRestrictions) String() string { return proto.CompactTextString(m) } +func (*NodeRestrictions) ProtoMessage() {} +func (*NodeRestrictions) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{5} +} +func (m *NodeRestrictions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeRestrictions.Unmarshal(m, b) +} +func (m *NodeRestrictions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeRestrictions.Marshal(b, m, deterministic) +} +func (m *NodeRestrictions) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeRestrictions.Merge(m, src) +} +func (m *NodeRestrictions) XXX_Size() int { + return xxx_messageInfo_NodeRestrictions.Size(m) +} +func (m *NodeRestrictions) XXX_DiscardUnknown() { + xxx_messageInfo_NodeRestrictions.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeRestrictions proto.InternalMessageInfo + +func (m *NodeRestrictions) GetFreeBandwidth() int64 { + if m != nil { + return m.FreeBandwidth + } + return 0 +} + +func (m *NodeRestrictions) GetFreeDisk() int64 { + if m != nil { + return m.FreeDisk + } + return 0 +} + +// NodeVersion contains +type NodeVersion struct { + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + CommitHash string `protobuf:"bytes,2,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` + Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Release bool `protobuf:"varint,4,opt,name=release,proto3" json:"release,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeVersion) Reset() { *m = NodeVersion{} } +func (m *NodeVersion) String() string { return proto.CompactTextString(m) } +func (*NodeVersion) ProtoMessage() {} +func (*NodeVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_0c843d59d2d938e7, []int{6} +} +func (m *NodeVersion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeVersion.Unmarshal(m, b) +} +func (m *NodeVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeVersion.Marshal(b, m, deterministic) +} +func (m *NodeVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeVersion.Merge(m, src) +} +func (m *NodeVersion) XXX_Size() int { + return xxx_messageInfo_NodeVersion.Size(m) +} +func (m *NodeVersion) XXX_DiscardUnknown() { + xxx_messageInfo_NodeVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeVersion proto.InternalMessageInfo + +func (m *NodeVersion) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *NodeVersion) GetCommitHash() string { + if m != nil { + return m.CommitHash + } + return "" +} + +func (m *NodeVersion) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *NodeVersion) GetRelease() bool { + if m != nil { + return m.Release + } + return false +} + +func init() { + proto.RegisterEnum("node.NodeType", NodeType_name, NodeType_value) + proto.RegisterEnum("node.NodeTransport", NodeTransport_name, NodeTransport_value) + proto.RegisterType((*Node)(nil), "node.Node") + proto.RegisterType((*NodeAddress)(nil), "node.NodeAddress") + proto.RegisterType((*NodeOperator)(nil), "node.NodeOperator") + proto.RegisterType((*NodeCapacity)(nil), "node.NodeCapacity") + proto.RegisterType((*NodeMetadata)(nil), "node.NodeMetadata") + proto.RegisterType((*NodeRestrictions)(nil), "node.NodeRestrictions") + proto.RegisterType((*NodeVersion)(nil), "node.NodeVersion") +} + +func init() { proto.RegisterFile("node.proto", fileDescriptor_0c843d59d2d938e7) } + +var fileDescriptor_0c843d59d2d938e7 = []byte{ + // 623 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcb, 0x6e, 0xdb, 0x38, + 0x14, 0x8d, 0x6c, 0xc7, 0x91, 0xe8, 0x07, 0x14, 0x4e, 0x30, 0x10, 0x3c, 0xc0, 0xd8, 0x63, 0x60, + 0x00, 0x37, 0x05, 0xec, 0x36, 0xdd, 0x76, 0x63, 0x25, 0x41, 0xea, 0xd6, 0x8d, 0x0d, 0x5a, 0xcd, + 0x22, 0x1b, 0x81, 0x16, 0x19, 0x9b, 0x8d, 0x2c, 0x12, 0x24, 0xd5, 0xc0, 0x7f, 0xd1, 0x5d, 0xff, + 0xa0, 0xdf, 0xd2, 0x6f, 0xe8, 0x22, 0xfd, 0x95, 0x82, 0x7a, 0xc4, 0x09, 0xd0, 0x4d, 0x76, 0x3a, + 0xf7, 0x9c, 0x4b, 0x1d, 0x9e, 0x7b, 0x09, 0x40, 0xc2, 0x09, 0x1d, 0x0a, 0xc9, 0x35, 0x87, 0x35, + 0xf3, 0xdd, 0x01, 0x2b, 0xbe, 0xe2, 0x79, 0xa5, 0xd3, 0x5d, 0x71, 0xbe, 0x8a, 0xe9, 0x28, 0x43, + 0xcb, 0xf4, 0x66, 0xa4, 0xd9, 0x86, 0x2a, 0x8d, 0x37, 0x22, 0x17, 0xf4, 0xbf, 0x55, 0x40, 0xed, + 0x92, 0x13, 0x0a, 0xff, 0x05, 0x15, 0x46, 0x3c, 0xab, 0x67, 0x0d, 0x9a, 0x7e, 0xfb, 0xc7, 0x7d, + 0x77, 0xef, 0xe7, 0x7d, 0xb7, 0x6e, 0x98, 0xc9, 0x19, 0xaa, 0x30, 0x02, 0x5f, 0x82, 0x03, 0x4c, + 0x88, 0xa4, 0x4a, 0x79, 0x95, 0x9e, 0x35, 0x68, 0x9c, 0x1c, 0x0e, 0xb3, 0x3f, 0x1b, 0xc9, 0x38, + 0x27, 0x50, 0xa9, 0x80, 0xaf, 0x00, 0x24, 0x54, 0x48, 0x1a, 0x61, 0x4d, 0x49, 0x18, 0x63, 0xa5, + 0x43, 0x26, 0xbc, 0x76, 0xcf, 0x1a, 0x38, 0x7e, 0xc5, 0xb3, 0x90, 0xbb, 0x63, 0xa7, 0x58, 0xe9, + 0x89, 0x78, 0x5f, 0xb3, 0xab, 0x6e, 0x1b, 0xd5, 0xf4, 0x56, 0x50, 0xd4, 0x94, 0x54, 0x69, 0xc9, + 0x22, 0xcd, 0x78, 0xa2, 0x10, 0x90, 0x54, 0xa4, 0x1a, 0x1b, 0x80, 0xec, 0x0d, 0xd5, 0x98, 0x60, + 0x8d, 0x51, 0x33, 0xc6, 0x9a, 0x26, 0xd1, 0x36, 0x8c, 0x99, 0xd2, 0xa8, 0x85, 0x53, 0xc2, 0x74, + 0xa8, 0xd2, 0x28, 0x32, 0x3e, 0xf6, 0x99, 0x0a, 0x53, 0x81, 0xda, 0xa9, 0x20, 0x58, 0xd3, 0xb0, + 0x90, 0xa2, 0xa3, 0x02, 0x3f, 0x15, 0xb7, 0x8a, 0x6a, 0x2a, 0x4c, 0x36, 0xe8, 0xe0, 0x0b, 0x95, + 0x8a, 0xf1, 0xa4, 0x7f, 0x0d, 0x1a, 0x8f, 0xee, 0x06, 0x5f, 0x03, 0x47, 0x4b, 0x9c, 0x28, 0xc1, + 0xa5, 0xce, 0x62, 0x6a, 0x9f, 0xfc, 0xb5, 0x4b, 0x20, 0x28, 0x29, 0xb4, 0x53, 0x41, 0xef, 0x69, + 0x64, 0xce, 0x43, 0x3e, 0xfd, 0xb7, 0xa0, 0x69, 0xba, 0x66, 0x82, 0x4a, 0xac, 0xb9, 0x84, 0x47, + 0x60, 0x9f, 0x6e, 0x30, 0x8b, 0xb3, 0x83, 0x1d, 0x94, 0x03, 0xf8, 0x37, 0xa8, 0xdf, 0xe1, 0x38, + 0xa6, 0xba, 0x68, 0x2f, 0x50, 0xff, 0x2a, 0xef, 0x3e, 0xc5, 0x02, 0x47, 0x4c, 0x6f, 0xe1, 0x0b, + 0xd0, 0xbe, 0x91, 0x94, 0x86, 0x4b, 0x9c, 0x90, 0x3b, 0x46, 0xf4, 0x3a, 0x3b, 0xa6, 0x9a, 0x25, + 0xdd, 0x32, 0x8c, 0x5f, 0x12, 0xf0, 0x1f, 0xe0, 0x64, 0x52, 0xc2, 0xd4, 0x6d, 0x76, 0x6a, 0x15, + 0xd9, 0xa6, 0x70, 0xc6, 0xd4, 0x6d, 0xe9, 0xea, 0x63, 0x91, 0xf1, 0xb3, 0x5d, 0xb9, 0xa6, 0x1b, + 0x3d, 0x9a, 0x1d, 0xfc, 0xff, 0xcf, 0xce, 0x9e, 0xe5, 0xea, 0xbb, 0x95, 0x0f, 0xe2, 0x2a, 0x9f, + 0x8b, 0x49, 0xb5, 0x18, 0x51, 0xe1, 0xab, 0x84, 0xb0, 0x0b, 0x1a, 0x11, 0xdf, 0x6c, 0x98, 0x0e, + 0xd7, 0x58, 0xad, 0x0b, 0x7b, 0x20, 0x2f, 0xbd, 0xc3, 0x6a, 0x0d, 0x7d, 0xe0, 0x3c, 0xec, 0xbf, + 0x57, 0xcd, 0xb6, 0xb8, 0x33, 0xcc, 0x5f, 0xc8, 0xb0, 0x7c, 0x21, 0xc3, 0xa0, 0x54, 0xf8, 0xb6, + 0x79, 0x06, 0x5f, 0x7f, 0x75, 0x2d, 0xb4, 0x6b, 0x33, 0xbf, 0x97, 0x34, 0xa6, 0x58, 0x51, 0xaf, + 0xd6, 0xb3, 0x06, 0x36, 0x2a, 0xe1, 0x31, 0x02, 0x76, 0xb6, 0x0a, 0x5b, 0x41, 0x61, 0x03, 0x1c, + 0x4c, 0x2e, 0xaf, 0xc6, 0xd3, 0xc9, 0x99, 0xbb, 0x07, 0x5b, 0xc0, 0x59, 0x8c, 0x83, 0xf3, 0xe9, + 0x74, 0x12, 0x9c, 0xbb, 0x96, 0xe1, 0x16, 0xc1, 0x0c, 0x8d, 0x2f, 0xce, 0xdd, 0x0a, 0x04, 0xa0, + 0xfe, 0x69, 0x3e, 0x9d, 0x5c, 0x7e, 0x70, 0xab, 0xf0, 0x10, 0x38, 0xfe, 0x6c, 0x16, 0x2c, 0x02, + 0x34, 0x9e, 0xbb, 0xb5, 0x4e, 0xc5, 0xb6, 0x8e, 0xff, 0x03, 0xad, 0x27, 0xeb, 0x05, 0x5d, 0xd0, + 0x0c, 0x4e, 0xe7, 0x61, 0x30, 0x5d, 0x84, 0x17, 0x68, 0x7e, 0xea, 0xee, 0xf9, 0x47, 0xd7, 0x50, + 0x69, 0x2e, 0x3f, 0x0f, 0x19, 0x1f, 0x99, 0xbb, 0xf2, 0x64, 0x24, 0x96, 0xcb, 0x7a, 0x76, 0x9f, + 0x37, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2e, 0x80, 0xb8, 0x51, 0x1f, 0x04, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/node.proto b/vendor/storj.io/common/pb/node.proto new file mode 100644 index 000000000..e6ec958e1 --- /dev/null +++ b/vendor/storj.io/common/pb/node.proto @@ -0,0 +1,73 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package node; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +// TODO move statdb.Update() stuff out of here +// Node represents a node in the overlay network +// Node is info for a updating a single storagenode, used in the Update rpc calls +message Node { + bytes id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + NodeAddress address = 2; + reserved 3 to 13; + string deprecated_last_ip = 14 [deprecated=true]; + reserved "type", "restrictions", "reputation", "metadata", "latency_list", "audit_success", "is_up", "update_latency", "update_audit_success", "update_uptime", "version"; +} + +// NodeType is an enum of possible node types +enum NodeType { + INVALID = 0; + SATELLITE = 1; + STORAGE = 2; + UPLINK = 3; + BOOTSTRAP = 4 [deprecated=true]; +} + +// NodeAddress contains the information needed to communicate with a node on the network +message NodeAddress { + NodeTransport transport = 1; + string address = 2; +} + +// NodeTransport is an enum of possible transports for the overlay network +enum NodeTransport { + TCP_TLS_GRPC = 0; +} + +// NodeOperator contains info about the storage node operator +message NodeOperator { + string email = 1; + string wallet = 2; +} + +// NodeCapacity contains all relevant data about a nodes ability to store data +message NodeCapacity { + int64 free_bandwidth = 1 [deprecated=true]; + int64 free_disk = 2; +} + +// Deprecated: use NodeOperator instead +message NodeMetadata { + string email = 1; + string wallet = 2; +} + +// Deprecated: use NodeCapacity instead +message NodeRestrictions { + int64 free_bandwidth = 1; + int64 free_disk = 2; +} + +// NodeVersion contains +message NodeVersion { + string version = 1; // must be semver formatted + string commit_hash = 2; + google.protobuf.Timestamp timestamp = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + bool release = 4; +} diff --git a/vendor/storj.io/common/pb/nodestats.pb.go b/vendor/storj.io/common/pb/nodestats.pb.go new file mode 100644 index 000000000..1c447d8e1 --- /dev/null +++ b/vendor/storj.io/common/pb/nodestats.pb.go @@ -0,0 +1,628 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nodestats.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type ReputationStats struct { + TotalCount int64 `protobuf:"varint,1,opt,name=total_count,json=totalCount,proto3" json:"total_count,omitempty"` + SuccessCount int64 `protobuf:"varint,2,opt,name=success_count,json=successCount,proto3" json:"success_count,omitempty"` + ReputationAlpha float64 `protobuf:"fixed64,3,opt,name=reputation_alpha,json=reputationAlpha,proto3" json:"reputation_alpha,omitempty"` + ReputationBeta float64 `protobuf:"fixed64,4,opt,name=reputation_beta,json=reputationBeta,proto3" json:"reputation_beta,omitempty"` + ReputationScore float64 `protobuf:"fixed64,5,opt,name=reputation_score,json=reputationScore,proto3" json:"reputation_score,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReputationStats) Reset() { *m = ReputationStats{} } +func (m *ReputationStats) String() string { return proto.CompactTextString(m) } +func (*ReputationStats) ProtoMessage() {} +func (*ReputationStats) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{0} +} +func (m *ReputationStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReputationStats.Unmarshal(m, b) +} +func (m *ReputationStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReputationStats.Marshal(b, m, deterministic) +} +func (m *ReputationStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReputationStats.Merge(m, src) +} +func (m *ReputationStats) XXX_Size() int { + return xxx_messageInfo_ReputationStats.Size(m) +} +func (m *ReputationStats) XXX_DiscardUnknown() { + xxx_messageInfo_ReputationStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ReputationStats proto.InternalMessageInfo + +func (m *ReputationStats) GetTotalCount() int64 { + if m != nil { + return m.TotalCount + } + return 0 +} + +func (m *ReputationStats) GetSuccessCount() int64 { + if m != nil { + return m.SuccessCount + } + return 0 +} + +func (m *ReputationStats) GetReputationAlpha() float64 { + if m != nil { + return m.ReputationAlpha + } + return 0 +} + +func (m *ReputationStats) GetReputationBeta() float64 { + if m != nil { + return m.ReputationBeta + } + return 0 +} + +func (m *ReputationStats) GetReputationScore() float64 { + if m != nil { + return m.ReputationScore + } + return 0 +} + +type GetStatsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStatsRequest) Reset() { *m = GetStatsRequest{} } +func (m *GetStatsRequest) String() string { return proto.CompactTextString(m) } +func (*GetStatsRequest) ProtoMessage() {} +func (*GetStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{1} +} +func (m *GetStatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetStatsRequest.Unmarshal(m, b) +} +func (m *GetStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetStatsRequest.Marshal(b, m, deterministic) +} +func (m *GetStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStatsRequest.Merge(m, src) +} +func (m *GetStatsRequest) XXX_Size() int { + return xxx_messageInfo_GetStatsRequest.Size(m) +} +func (m *GetStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStatsRequest proto.InternalMessageInfo + +type GetStatsResponse struct { + UptimeCheck *ReputationStats `protobuf:"bytes,1,opt,name=uptime_check,json=uptimeCheck,proto3" json:"uptime_check,omitempty"` + AuditCheck *ReputationStats `protobuf:"bytes,2,opt,name=audit_check,json=auditCheck,proto3" json:"audit_check,omitempty"` + Disqualified *time.Time `protobuf:"bytes,3,opt,name=disqualified,proto3,stdtime" json:"disqualified,omitempty"` + Suspended *time.Time `protobuf:"bytes,4,opt,name=suspended,proto3,stdtime" json:"suspended,omitempty"` + JoinedAt time.Time `protobuf:"bytes,5,opt,name=joined_at,json=joinedAt,proto3,stdtime" json:"joined_at"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetStatsResponse) Reset() { *m = GetStatsResponse{} } +func (m *GetStatsResponse) String() string { return proto.CompactTextString(m) } +func (*GetStatsResponse) ProtoMessage() {} +func (*GetStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{2} +} +func (m *GetStatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetStatsResponse.Unmarshal(m, b) +} +func (m *GetStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetStatsResponse.Marshal(b, m, deterministic) +} +func (m *GetStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetStatsResponse.Merge(m, src) +} +func (m *GetStatsResponse) XXX_Size() int { + return xxx_messageInfo_GetStatsResponse.Size(m) +} +func (m *GetStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetStatsResponse proto.InternalMessageInfo + +func (m *GetStatsResponse) GetUptimeCheck() *ReputationStats { + if m != nil { + return m.UptimeCheck + } + return nil +} + +func (m *GetStatsResponse) GetAuditCheck() *ReputationStats { + if m != nil { + return m.AuditCheck + } + return nil +} + +func (m *GetStatsResponse) GetDisqualified() *time.Time { + if m != nil { + return m.Disqualified + } + return nil +} + +func (m *GetStatsResponse) GetSuspended() *time.Time { + if m != nil { + return m.Suspended + } + return nil +} + +func (m *GetStatsResponse) GetJoinedAt() time.Time { + if m != nil { + return m.JoinedAt + } + return time.Time{} +} + +type DailyStorageUsageRequest struct { + From time.Time `protobuf:"bytes,1,opt,name=from,proto3,stdtime" json:"from"` + To time.Time `protobuf:"bytes,2,opt,name=to,proto3,stdtime" json:"to"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DailyStorageUsageRequest) Reset() { *m = DailyStorageUsageRequest{} } +func (m *DailyStorageUsageRequest) String() string { return proto.CompactTextString(m) } +func (*DailyStorageUsageRequest) ProtoMessage() {} +func (*DailyStorageUsageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{3} +} +func (m *DailyStorageUsageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DailyStorageUsageRequest.Unmarshal(m, b) +} +func (m *DailyStorageUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DailyStorageUsageRequest.Marshal(b, m, deterministic) +} +func (m *DailyStorageUsageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DailyStorageUsageRequest.Merge(m, src) +} +func (m *DailyStorageUsageRequest) XXX_Size() int { + return xxx_messageInfo_DailyStorageUsageRequest.Size(m) +} +func (m *DailyStorageUsageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DailyStorageUsageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DailyStorageUsageRequest proto.InternalMessageInfo + +func (m *DailyStorageUsageRequest) GetFrom() time.Time { + if m != nil { + return m.From + } + return time.Time{} +} + +func (m *DailyStorageUsageRequest) GetTo() time.Time { + if m != nil { + return m.To + } + return time.Time{} +} + +type DailyStorageUsageResponse struct { + NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + DailyStorageUsage []*DailyStorageUsageResponse_StorageUsage `protobuf:"bytes,2,rep,name=daily_storage_usage,json=dailyStorageUsage,proto3" json:"daily_storage_usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DailyStorageUsageResponse) Reset() { *m = DailyStorageUsageResponse{} } +func (m *DailyStorageUsageResponse) String() string { return proto.CompactTextString(m) } +func (*DailyStorageUsageResponse) ProtoMessage() {} +func (*DailyStorageUsageResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{4} +} +func (m *DailyStorageUsageResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DailyStorageUsageResponse.Unmarshal(m, b) +} +func (m *DailyStorageUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DailyStorageUsageResponse.Marshal(b, m, deterministic) +} +func (m *DailyStorageUsageResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DailyStorageUsageResponse.Merge(m, src) +} +func (m *DailyStorageUsageResponse) XXX_Size() int { + return xxx_messageInfo_DailyStorageUsageResponse.Size(m) +} +func (m *DailyStorageUsageResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DailyStorageUsageResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DailyStorageUsageResponse proto.InternalMessageInfo + +func (m *DailyStorageUsageResponse) GetDailyStorageUsage() []*DailyStorageUsageResponse_StorageUsage { + if m != nil { + return m.DailyStorageUsage + } + return nil +} + +type DailyStorageUsageResponse_StorageUsage struct { + AtRestTotal float64 `protobuf:"fixed64,1,opt,name=at_rest_total,json=atRestTotal,proto3" json:"at_rest_total,omitempty"` + Timestamp time.Time `protobuf:"bytes,2,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DailyStorageUsageResponse_StorageUsage) Reset() { + *m = DailyStorageUsageResponse_StorageUsage{} +} +func (m *DailyStorageUsageResponse_StorageUsage) String() string { return proto.CompactTextString(m) } +func (*DailyStorageUsageResponse_StorageUsage) ProtoMessage() {} +func (*DailyStorageUsageResponse_StorageUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{4, 0} +} +func (m *DailyStorageUsageResponse_StorageUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DailyStorageUsageResponse_StorageUsage.Unmarshal(m, b) +} +func (m *DailyStorageUsageResponse_StorageUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DailyStorageUsageResponse_StorageUsage.Marshal(b, m, deterministic) +} +func (m *DailyStorageUsageResponse_StorageUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_DailyStorageUsageResponse_StorageUsage.Merge(m, src) +} +func (m *DailyStorageUsageResponse_StorageUsage) XXX_Size() int { + return xxx_messageInfo_DailyStorageUsageResponse_StorageUsage.Size(m) +} +func (m *DailyStorageUsageResponse_StorageUsage) XXX_DiscardUnknown() { + xxx_messageInfo_DailyStorageUsageResponse_StorageUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_DailyStorageUsageResponse_StorageUsage proto.InternalMessageInfo + +func (m *DailyStorageUsageResponse_StorageUsage) GetAtRestTotal() float64 { + if m != nil { + return m.AtRestTotal + } + return 0 +} + +func (m *DailyStorageUsageResponse_StorageUsage) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +type PricingModelRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PricingModelRequest) Reset() { *m = PricingModelRequest{} } +func (m *PricingModelRequest) String() string { return proto.CompactTextString(m) } +func (*PricingModelRequest) ProtoMessage() {} +func (*PricingModelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{5} +} +func (m *PricingModelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PricingModelRequest.Unmarshal(m, b) +} +func (m *PricingModelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PricingModelRequest.Marshal(b, m, deterministic) +} +func (m *PricingModelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PricingModelRequest.Merge(m, src) +} +func (m *PricingModelRequest) XXX_Size() int { + return xxx_messageInfo_PricingModelRequest.Size(m) +} +func (m *PricingModelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PricingModelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PricingModelRequest proto.InternalMessageInfo + +type PricingModelResponse struct { + EgressBandwidthPrice int64 `protobuf:"varint,1,opt,name=egress_bandwidth_price,json=egressBandwidthPrice,proto3" json:"egress_bandwidth_price,omitempty"` + RepairBandwidthPrice int64 `protobuf:"varint,2,opt,name=repair_bandwidth_price,json=repairBandwidthPrice,proto3" json:"repair_bandwidth_price,omitempty"` + DiskSpacePrice int64 `protobuf:"varint,3,opt,name=disk_space_price,json=diskSpacePrice,proto3" json:"disk_space_price,omitempty"` + AuditBandwidthPrice int64 `protobuf:"varint,4,opt,name=audit_bandwidth_price,json=auditBandwidthPrice,proto3" json:"audit_bandwidth_price,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PricingModelResponse) Reset() { *m = PricingModelResponse{} } +func (m *PricingModelResponse) String() string { return proto.CompactTextString(m) } +func (*PricingModelResponse) ProtoMessage() {} +func (*PricingModelResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e0b184ee117142aa, []int{6} +} +func (m *PricingModelResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PricingModelResponse.Unmarshal(m, b) +} +func (m *PricingModelResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PricingModelResponse.Marshal(b, m, deterministic) +} +func (m *PricingModelResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PricingModelResponse.Merge(m, src) +} +func (m *PricingModelResponse) XXX_Size() int { + return xxx_messageInfo_PricingModelResponse.Size(m) +} +func (m *PricingModelResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PricingModelResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PricingModelResponse proto.InternalMessageInfo + +func (m *PricingModelResponse) GetEgressBandwidthPrice() int64 { + if m != nil { + return m.EgressBandwidthPrice + } + return 0 +} + +func (m *PricingModelResponse) GetRepairBandwidthPrice() int64 { + if m != nil { + return m.RepairBandwidthPrice + } + return 0 +} + +func (m *PricingModelResponse) GetDiskSpacePrice() int64 { + if m != nil { + return m.DiskSpacePrice + } + return 0 +} + +func (m *PricingModelResponse) GetAuditBandwidthPrice() int64 { + if m != nil { + return m.AuditBandwidthPrice + } + return 0 +} + +func init() { + proto.RegisterType((*ReputationStats)(nil), "nodestats.ReputationStats") + proto.RegisterType((*GetStatsRequest)(nil), "nodestats.GetStatsRequest") + proto.RegisterType((*GetStatsResponse)(nil), "nodestats.GetStatsResponse") + proto.RegisterType((*DailyStorageUsageRequest)(nil), "nodestats.DailyStorageUsageRequest") + proto.RegisterType((*DailyStorageUsageResponse)(nil), "nodestats.DailyStorageUsageResponse") + proto.RegisterType((*DailyStorageUsageResponse_StorageUsage)(nil), "nodestats.DailyStorageUsageResponse.StorageUsage") + proto.RegisterType((*PricingModelRequest)(nil), "nodestats.PricingModelRequest") + proto.RegisterType((*PricingModelResponse)(nil), "nodestats.PricingModelResponse") +} + +func init() { proto.RegisterFile("nodestats.proto", fileDescriptor_e0b184ee117142aa) } + +var fileDescriptor_e0b184ee117142aa = []byte{ + // 691 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x4e, 0xdb, 0x4a, + 0x18, 0xbd, 0x76, 0xb8, 0x5c, 0xf2, 0x25, 0x10, 0x18, 0xc2, 0x55, 0x6e, 0xae, 0xd4, 0xa0, 0x50, + 0x89, 0x74, 0x93, 0xa8, 0x29, 0x8b, 0x4a, 0x55, 0x17, 0x04, 0xa4, 0x96, 0x45, 0x7f, 0xe4, 0xd0, + 0x4d, 0x17, 0xb5, 0x26, 0x9e, 0x0f, 0x33, 0x90, 0x78, 0x8c, 0x67, 0xdc, 0xaa, 0xcb, 0x6e, 0xbb, + 0xea, 0x1b, 0xf4, 0x75, 0xfa, 0x04, 0x5d, 0x74, 0x01, 0x7d, 0x94, 0x6a, 0x66, 0x9c, 0x3f, 0x03, + 0x25, 0x5d, 0xe6, 0x7c, 0xe7, 0x9c, 0x8c, 0xcf, 0x99, 0x6f, 0xa0, 0x12, 0x09, 0x86, 0x52, 0x51, + 0x25, 0xdb, 0x71, 0x22, 0x94, 0x20, 0xc5, 0x09, 0x50, 0x87, 0x50, 0x84, 0xc2, 0xc2, 0xf5, 0x46, + 0x28, 0x44, 0x38, 0xc4, 0x8e, 0xf9, 0x35, 0x48, 0x4f, 0x3a, 0x8a, 0x8f, 0x34, 0x6d, 0x14, 0x5b, + 0x42, 0xf3, 0xbb, 0x03, 0x15, 0x0f, 0xe3, 0x54, 0x51, 0xc5, 0x45, 0xd4, 0xd7, 0x06, 0xa4, 0x01, + 0x25, 0x25, 0x14, 0x1d, 0xfa, 0x81, 0x48, 0x23, 0x55, 0x73, 0xb6, 0x9d, 0x56, 0xc1, 0x03, 0x03, + 0x1d, 0x68, 0x84, 0xec, 0xc0, 0xaa, 0x4c, 0x83, 0x00, 0xa5, 0xcc, 0x28, 0xae, 0xa1, 0x94, 0x33, + 0xd0, 0x92, 0x1e, 0xc0, 0x7a, 0x32, 0x31, 0xf6, 0xe9, 0x30, 0x3e, 0xa5, 0xb5, 0xc2, 0xb6, 0xd3, + 0x72, 0xbc, 0xca, 0x14, 0xdf, 0xd7, 0x30, 0xd9, 0x85, 0x19, 0xc8, 0x1f, 0xa0, 0xa2, 0xb5, 0x25, + 0xc3, 0x5c, 0x9b, 0xc2, 0x3d, 0x54, 0x34, 0xe7, 0x29, 0x03, 0x91, 0x60, 0xed, 0xef, 0xbc, 0x67, + 0x5f, 0xc3, 0xcd, 0x0d, 0xa8, 0x3c, 0x43, 0x65, 0x3e, 0xc8, 0xc3, 0x8b, 0x14, 0xa5, 0x6a, 0x5e, + 0xb9, 0xb0, 0x3e, 0xc5, 0x64, 0x2c, 0x22, 0x89, 0xe4, 0x29, 0x94, 0xd3, 0x58, 0xa7, 0xe2, 0x07, + 0xa7, 0x18, 0x9c, 0x9b, 0xaf, 0x2d, 0x75, 0xeb, 0xed, 0x69, 0xc0, 0xb9, 0x78, 0xbc, 0x92, 0xe5, + 0x1f, 0x68, 0x3a, 0x79, 0x02, 0x25, 0x9a, 0x32, 0xae, 0x32, 0xb5, 0x7b, 0xa7, 0x1a, 0x0c, 0xdd, + 0x8a, 0x9f, 0x43, 0x99, 0x71, 0x79, 0x91, 0xd2, 0x21, 0x3f, 0xe1, 0xc8, 0x4c, 0x3c, 0x5a, 0x6d, + 0x4b, 0x6b, 0x8f, 0x4b, 0x6b, 0x1f, 0x8f, 0x4b, 0xeb, 0xad, 0x7c, 0xbb, 0x6c, 0x38, 0x5f, 0xae, + 0x1a, 0x8e, 0x37, 0xa7, 0x24, 0x3d, 0x28, 0xca, 0x54, 0xc6, 0x18, 0x31, 0x64, 0x26, 0xbb, 0x45, + 0x6d, 0xa6, 0x32, 0xb2, 0x0f, 0xc5, 0x33, 0xc1, 0x23, 0x64, 0x3e, 0x55, 0x26, 0xd5, 0xbb, 0x3d, + 0xfe, 0x32, 0x1e, 0x2b, 0x56, 0xb6, 0xaf, 0x9a, 0x9f, 0x1d, 0xa8, 0x1d, 0x52, 0x3e, 0xfc, 0xd8, + 0x57, 0x22, 0xa1, 0x21, 0xbe, 0x91, 0x34, 0xc4, 0x2c, 0x7e, 0xf2, 0x18, 0x96, 0x4e, 0x12, 0x31, + 0x9a, 0x24, 0xbc, 0x88, 0xb5, 0x51, 0x90, 0x3d, 0x70, 0x95, 0x98, 0x64, 0xbb, 0x88, 0xce, 0x55, + 0xa2, 0xf9, 0xd5, 0x85, 0xff, 0x6e, 0x38, 0x4c, 0xd6, 0xfb, 0x2e, 0xfc, 0xa3, 0x4b, 0xf2, 0x39, + 0x33, 0x07, 0x2a, 0xf7, 0xd6, 0xb4, 0xf8, 0xc7, 0x65, 0x63, 0xf9, 0xa5, 0x60, 0x78, 0x74, 0xe8, + 0x2d, 0xeb, 0xf1, 0x11, 0x23, 0x14, 0x36, 0x99, 0x76, 0xf1, 0xa5, 0xb5, 0xf1, 0x53, 0xed, 0x53, + 0x73, 0xb7, 0x0b, 0xad, 0x52, 0xf7, 0xe1, 0x4c, 0xd3, 0xb7, 0xfe, 0x57, 0x7b, 0x0e, 0xdc, 0x60, + 0x79, 0x5e, 0xfd, 0x3d, 0x94, 0x67, 0x7f, 0x93, 0x26, 0xac, 0x52, 0xe5, 0x27, 0x28, 0x95, 0x6f, + 0xb6, 0xce, 0x9c, 0xd0, 0xf1, 0x4a, 0x54, 0x79, 0x28, 0xd5, 0xb1, 0x86, 0x74, 0xe3, 0x93, 0x5d, + 0xfe, 0xa3, 0x68, 0xa6, 0xb2, 0xe6, 0x16, 0x6c, 0xbe, 0x4e, 0x78, 0xc0, 0xa3, 0xf0, 0x85, 0x60, + 0x38, 0x1c, 0xef, 0xc9, 0x4f, 0x07, 0xaa, 0xf3, 0x78, 0x96, 0xd9, 0x1e, 0xfc, 0x8b, 0x61, 0xa2, + 0xd7, 0x7e, 0x40, 0x23, 0xf6, 0x81, 0x33, 0x75, 0xea, 0xc7, 0x09, 0x0f, 0x30, 0x7b, 0x23, 0xaa, + 0x76, 0xda, 0x1b, 0x0f, 0xb5, 0x89, 0x51, 0x25, 0x18, 0x53, 0x9e, 0x5c, 0x53, 0xd9, 0x67, 0xa3, + 0x6a, 0xa7, 0x39, 0x55, 0x0b, 0xd6, 0x19, 0x97, 0xe7, 0xbe, 0x8c, 0x69, 0x80, 0x19, 0xbf, 0x60, + 0xf8, 0x6b, 0x1a, 0xef, 0x6b, 0xd8, 0x32, 0xbb, 0xb0, 0x65, 0x57, 0x30, 0x6f, 0xbf, 0x64, 0xe8, + 0x9b, 0x66, 0x38, 0xef, 0xde, 0xfd, 0xe4, 0x42, 0x51, 0xf7, 0x6c, 0x1f, 0xbc, 0x03, 0x58, 0x19, + 0xbf, 0x0b, 0x64, 0x76, 0x77, 0x73, 0x0f, 0x48, 0xfd, 0xff, 0x1b, 0x67, 0x59, 0x38, 0xef, 0x60, + 0xe3, 0xda, 0x0d, 0x20, 0x3b, 0xbf, 0xbf, 0x1f, 0xd6, 0xf6, 0xfe, 0x22, 0x97, 0x88, 0xbc, 0x82, + 0xf2, 0x6c, 0x29, 0xe4, 0xde, 0x8c, 0xea, 0x86, 0x16, 0xeb, 0x8d, 0x5b, 0xe7, 0xd6, 0xb0, 0x57, + 0x7d, 0x4b, 0xf4, 0x95, 0x3e, 0x6b, 0x73, 0xd1, 0x09, 0xc4, 0x68, 0x24, 0xa2, 0x4e, 0x3c, 0x18, + 0x2c, 0x9b, 0xcb, 0xf3, 0xe8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0x8d, 0x3b, 0x4a, 0x62, + 0x06, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCNodeStatsClient interface { + DRPCConn() drpc.Conn + + GetStats(ctx context.Context, in *GetStatsRequest) (*GetStatsResponse, error) + DailyStorageUsage(ctx context.Context, in *DailyStorageUsageRequest) (*DailyStorageUsageResponse, error) + PricingModel(ctx context.Context, in *PricingModelRequest) (*PricingModelResponse, error) +} + +type drpcNodeStatsClient struct { + cc drpc.Conn +} + +func NewDRPCNodeStatsClient(cc drpc.Conn) DRPCNodeStatsClient { + return &drpcNodeStatsClient{cc} +} + +func (c *drpcNodeStatsClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcNodeStatsClient) GetStats(ctx context.Context, in *GetStatsRequest) (*GetStatsResponse, error) { + out := new(GetStatsResponse) + err := c.cc.Invoke(ctx, "/nodestats.NodeStats/GetStats", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcNodeStatsClient) DailyStorageUsage(ctx context.Context, in *DailyStorageUsageRequest) (*DailyStorageUsageResponse, error) { + out := new(DailyStorageUsageResponse) + err := c.cc.Invoke(ctx, "/nodestats.NodeStats/DailyStorageUsage", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcNodeStatsClient) PricingModel(ctx context.Context, in *PricingModelRequest) (*PricingModelResponse, error) { + out := new(PricingModelResponse) + err := c.cc.Invoke(ctx, "/nodestats.NodeStats/PricingModel", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCNodeStatsServer interface { + GetStats(context.Context, *GetStatsRequest) (*GetStatsResponse, error) + DailyStorageUsage(context.Context, *DailyStorageUsageRequest) (*DailyStorageUsageResponse, error) + PricingModel(context.Context, *PricingModelRequest) (*PricingModelResponse, error) +} + +type DRPCNodeStatsDescription struct{} + +func (DRPCNodeStatsDescription) NumMethods() int { return 3 } + +func (DRPCNodeStatsDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/nodestats.NodeStats/GetStats", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeStatsServer). + GetStats( + ctx, + in1.(*GetStatsRequest), + ) + }, DRPCNodeStatsServer.GetStats, true + case 1: + return "/nodestats.NodeStats/DailyStorageUsage", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeStatsServer). + DailyStorageUsage( + ctx, + in1.(*DailyStorageUsageRequest), + ) + }, DRPCNodeStatsServer.DailyStorageUsage, true + case 2: + return "/nodestats.NodeStats/PricingModel", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCNodeStatsServer). + PricingModel( + ctx, + in1.(*PricingModelRequest), + ) + }, DRPCNodeStatsServer.PricingModel, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterNodeStats(mux drpc.Mux, impl DRPCNodeStatsServer) error { + return mux.Register(impl, DRPCNodeStatsDescription{}) +} + +type DRPCNodeStats_GetStatsStream interface { + drpc.Stream + SendAndClose(*GetStatsResponse) error +} + +type drpcNodeStatsGetStatsStream struct { + drpc.Stream +} + +func (x *drpcNodeStatsGetStatsStream) SendAndClose(m *GetStatsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCNodeStats_DailyStorageUsageStream interface { + drpc.Stream + SendAndClose(*DailyStorageUsageResponse) error +} + +type drpcNodeStatsDailyStorageUsageStream struct { + drpc.Stream +} + +func (x *drpcNodeStatsDailyStorageUsageStream) SendAndClose(m *DailyStorageUsageResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCNodeStats_PricingModelStream interface { + drpc.Stream + SendAndClose(*PricingModelResponse) error +} + +type drpcNodeStatsPricingModelStream struct { + drpc.Stream +} + +func (x *drpcNodeStatsPricingModelStream) SendAndClose(m *PricingModelResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/nodestats.proto b/vendor/storj.io/common/pb/nodestats.proto new file mode 100644 index 000000000..6b8fb9e06 --- /dev/null +++ b/vendor/storj.io/common/pb/nodestats.proto @@ -0,0 +1,58 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package nodestats; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +service NodeStats { + rpc GetStats(GetStatsRequest) returns (GetStatsResponse); + rpc DailyStorageUsage(DailyStorageUsageRequest) returns (DailyStorageUsageResponse); + rpc PricingModel(PricingModelRequest) returns (PricingModelResponse); +} + +message ReputationStats { + int64 total_count = 1; + int64 success_count = 2; + double reputation_alpha = 3; + double reputation_beta = 4; + double reputation_score = 5; +} + +message GetStatsRequest {} + +message GetStatsResponse { + ReputationStats uptime_check = 1; + ReputationStats audit_check = 2; + google.protobuf.Timestamp disqualified = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + google.protobuf.Timestamp suspended = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = true]; + google.protobuf.Timestamp joined_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message DailyStorageUsageRequest { + google.protobuf.Timestamp from = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp to = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message DailyStorageUsageResponse { + message StorageUsage { + double at_rest_total = 1; + google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + } + + bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + repeated StorageUsage daily_storage_usage = 2; +} + +message PricingModelRequest {} + +message PricingModelResponse { + int64 egress_bandwidth_price = 1; + int64 repair_bandwidth_price = 2; + int64 disk_space_price = 3; + int64 audit_bandwidth_price = 4; +} diff --git a/vendor/storj.io/common/pb/orders.pb.go b/vendor/storj.io/common/pb/orders.pb.go new file mode 100644 index 000000000..a483f0fbe --- /dev/null +++ b/vendor/storj.io/common/pb/orders.pb.go @@ -0,0 +1,821 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: orders.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PieceAction is an enumeration of all possible executed actions on storage node +type PieceAction int32 + +const ( + PieceAction_INVALID PieceAction = 0 + PieceAction_PUT PieceAction = 1 + PieceAction_GET PieceAction = 2 + PieceAction_GET_AUDIT PieceAction = 3 + PieceAction_GET_REPAIR PieceAction = 4 + PieceAction_PUT_REPAIR PieceAction = 5 + PieceAction_DELETE PieceAction = 6 + PieceAction_PUT_GRACEFUL_EXIT PieceAction = 7 +) + +var PieceAction_name = map[int32]string{ + 0: "INVALID", + 1: "PUT", + 2: "GET", + 3: "GET_AUDIT", + 4: "GET_REPAIR", + 5: "PUT_REPAIR", + 6: "DELETE", + 7: "PUT_GRACEFUL_EXIT", +} + +var PieceAction_value = map[string]int32{ + "INVALID": 0, + "PUT": 1, + "GET": 2, + "GET_AUDIT": 3, + "GET_REPAIR": 4, + "PUT_REPAIR": 5, + "DELETE": 6, + "PUT_GRACEFUL_EXIT": 7, +} + +func (x PieceAction) String() string { + return proto.EnumName(PieceAction_name, int32(x)) +} + +func (PieceAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{0} +} + +type SettlementResponse_Status int32 + +const ( + SettlementResponse_INVALID SettlementResponse_Status = 0 + SettlementResponse_ACCEPTED SettlementResponse_Status = 1 + SettlementResponse_REJECTED SettlementResponse_Status = 2 +) + +var SettlementResponse_Status_name = map[int32]string{ + 0: "INVALID", + 1: "ACCEPTED", + 2: "REJECTED", +} + +var SettlementResponse_Status_value = map[string]int32{ + "INVALID": 0, + "ACCEPTED": 1, + "REJECTED": 2, +} + +func (x SettlementResponse_Status) String() string { + return proto.EnumName(SettlementResponse_Status_name, int32(x)) +} + +func (SettlementResponse_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{7, 0} +} + +// OrderLimit is provided by satellite to execute specific action on storage node within some limits +type OrderLimit struct { + // unique serial to avoid replay attacks + SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` + // satellite who issued this order limit allowing orderer to do the specified action + SatelliteId NodeID `protobuf:"bytes,2,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` + // uplink who requested or whom behalf the order limit to do an action + DeprecatedUplinkId *NodeID `protobuf:"bytes,3,opt,name=deprecated_uplink_id,json=deprecatedUplinkId,proto3,customtype=NodeID" json:"deprecated_uplink_id,omitempty"` + // public key that will be used to sign orders and piece hash + UplinkPublicKey PiecePublicKey `protobuf:"bytes,13,opt,name=uplink_public_key,json=uplinkPublicKey,proto3,customtype=PiecePublicKey" json:"uplink_public_key"` + // storage node who can re claimthe order limit specified by serial + StorageNodeId NodeID `protobuf:"bytes,4,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"` + // piece which is allowed to be touched + PieceId PieceID `protobuf:"bytes,5,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"` + // limit in bytes how much can be changed + Limit int64 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` + Action PieceAction `protobuf:"varint,7,opt,name=action,proto3,enum=orders.PieceAction" json:"action,omitempty"` + PieceExpiration time.Time `protobuf:"bytes,8,opt,name=piece_expiration,json=pieceExpiration,proto3,stdtime" json:"piece_expiration"` + OrderExpiration time.Time `protobuf:"bytes,9,opt,name=order_expiration,json=orderExpiration,proto3,stdtime" json:"order_expiration"` + OrderCreation time.Time `protobuf:"bytes,12,opt,name=order_creation,json=orderCreation,proto3,stdtime" json:"order_creation"` + SatelliteSignature []byte `protobuf:"bytes,10,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"` + // satellites aren't necessarily discoverable in kademlia. this allows + // a storage node to find a satellite and handshake with it to get its key. + SatelliteAddress *NodeAddress `protobuf:"bytes,11,opt,name=satellite_address,json=satelliteAddress,proto3" json:"satellite_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OrderLimit) Reset() { *m = OrderLimit{} } +func (m *OrderLimit) String() string { return proto.CompactTextString(m) } +func (*OrderLimit) ProtoMessage() {} +func (*OrderLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{0} +} +func (m *OrderLimit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OrderLimit.Unmarshal(m, b) +} +func (m *OrderLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OrderLimit.Marshal(b, m, deterministic) +} +func (m *OrderLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderLimit.Merge(m, src) +} +func (m *OrderLimit) XXX_Size() int { + return xxx_messageInfo_OrderLimit.Size(m) +} +func (m *OrderLimit) XXX_DiscardUnknown() { + xxx_messageInfo_OrderLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderLimit proto.InternalMessageInfo + +func (m *OrderLimit) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *OrderLimit) GetAction() PieceAction { + if m != nil { + return m.Action + } + return PieceAction_INVALID +} + +func (m *OrderLimit) GetPieceExpiration() time.Time { + if m != nil { + return m.PieceExpiration + } + return time.Time{} +} + +func (m *OrderLimit) GetOrderExpiration() time.Time { + if m != nil { + return m.OrderExpiration + } + return time.Time{} +} + +func (m *OrderLimit) GetOrderCreation() time.Time { + if m != nil { + return m.OrderCreation + } + return time.Time{} +} + +func (m *OrderLimit) GetSatelliteSignature() []byte { + if m != nil { + return m.SatelliteSignature + } + return nil +} + +func (m *OrderLimit) GetSatelliteAddress() *NodeAddress { + if m != nil { + return m.SatelliteAddress + } + return nil +} + +// OrderLimitSigning provides OrderLimit signing serialization +// +// It is never used for sending across the network, it is +// used in signing to ensure that nullable=false fields get handled properly. +// Its purpose is to solidify the format of how we serialize for +// signing, to handle some backwards compatibility considerations. +type OrderLimitSigning struct { + // unique serial to avoid replay attacks + SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` + // satellite who issued this order limit allowing orderer to do the specified action + SatelliteId NodeID `protobuf:"bytes,2,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` + // uplink who requested or whom behalf the order limit to do an action + DeprecatedUplinkId *NodeID `protobuf:"bytes,3,opt,name=deprecated_uplink_id,json=deprecatedUplinkId,proto3,customtype=NodeID" json:"deprecated_uplink_id,omitempty"` + // public key that will be used to sign orders and piece hash + UplinkPublicKey *PiecePublicKey `protobuf:"bytes,13,opt,name=uplink_public_key,json=uplinkPublicKey,proto3,customtype=PiecePublicKey" json:"uplink_public_key,omitempty"` + // storage node who can re claimthe order limit specified by serial + StorageNodeId NodeID `protobuf:"bytes,4,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"` + // piece which is allowed to be touched + PieceId PieceID `protobuf:"bytes,5,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"` + // limit in bytes how much can be changed + Limit int64 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"` + Action PieceAction `protobuf:"varint,7,opt,name=action,proto3,enum=orders.PieceAction" json:"action,omitempty"` + PieceExpiration *time.Time `protobuf:"bytes,8,opt,name=piece_expiration,json=pieceExpiration,proto3,stdtime" json:"piece_expiration,omitempty"` + OrderExpiration *time.Time `protobuf:"bytes,9,opt,name=order_expiration,json=orderExpiration,proto3,stdtime" json:"order_expiration,omitempty"` + OrderCreation *time.Time `protobuf:"bytes,12,opt,name=order_creation,json=orderCreation,proto3,stdtime" json:"order_creation,omitempty"` + SatelliteSignature []byte `protobuf:"bytes,10,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"` + // this allows a storage node to find a satellite and handshake with it to get its key. + SatelliteAddress *NodeAddress `protobuf:"bytes,11,opt,name=satellite_address,json=satelliteAddress,proto3" json:"satellite_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OrderLimitSigning) Reset() { *m = OrderLimitSigning{} } +func (m *OrderLimitSigning) String() string { return proto.CompactTextString(m) } +func (*OrderLimitSigning) ProtoMessage() {} +func (*OrderLimitSigning) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{1} +} +func (m *OrderLimitSigning) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OrderLimitSigning.Unmarshal(m, b) +} +func (m *OrderLimitSigning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OrderLimitSigning.Marshal(b, m, deterministic) +} +func (m *OrderLimitSigning) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderLimitSigning.Merge(m, src) +} +func (m *OrderLimitSigning) XXX_Size() int { + return xxx_messageInfo_OrderLimitSigning.Size(m) +} +func (m *OrderLimitSigning) XXX_DiscardUnknown() { + xxx_messageInfo_OrderLimitSigning.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderLimitSigning proto.InternalMessageInfo + +func (m *OrderLimitSigning) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *OrderLimitSigning) GetAction() PieceAction { + if m != nil { + return m.Action + } + return PieceAction_INVALID +} + +func (m *OrderLimitSigning) GetPieceExpiration() *time.Time { + if m != nil { + return m.PieceExpiration + } + return nil +} + +func (m *OrderLimitSigning) GetOrderExpiration() *time.Time { + if m != nil { + return m.OrderExpiration + } + return nil +} + +func (m *OrderLimitSigning) GetOrderCreation() *time.Time { + if m != nil { + return m.OrderCreation + } + return nil +} + +func (m *OrderLimitSigning) GetSatelliteSignature() []byte { + if m != nil { + return m.SatelliteSignature + } + return nil +} + +func (m *OrderLimitSigning) GetSatelliteAddress() *NodeAddress { + if m != nil { + return m.SatelliteAddress + } + return nil +} + +// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber +type Order struct { + // serial of the order limit that was signed + SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` + // amount to be signed for + Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // signature + UplinkSignature []byte `protobuf:"bytes,3,opt,name=uplink_signature,json=uplinkSignature,proto3" json:"uplink_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Order) Reset() { *m = Order{} } +func (m *Order) String() string { return proto.CompactTextString(m) } +func (*Order) ProtoMessage() {} +func (*Order) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{2} +} +func (m *Order) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Order.Unmarshal(m, b) +} +func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Order.Marshal(b, m, deterministic) +} +func (m *Order) XXX_Merge(src proto.Message) { + xxx_messageInfo_Order.Merge(m, src) +} +func (m *Order) XXX_Size() int { + return xxx_messageInfo_Order.Size(m) +} +func (m *Order) XXX_DiscardUnknown() { + xxx_messageInfo_Order.DiscardUnknown(m) +} + +var xxx_messageInfo_Order proto.InternalMessageInfo + +func (m *Order) GetAmount() int64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *Order) GetUplinkSignature() []byte { + if m != nil { + return m.UplinkSignature + } + return nil +} + +// OrderSigning provides Order signing format +// +// It is never used for sending across the network, it is +// used in signing to ensure that nullable=false fields get handled properly. +// Its purpose is to solidify the format of how we serialize for +// signing, to handle some backwards compatibility considerations. +type OrderSigning struct { + // serial of the order limit that was signed + SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` + // amount to be signed for + Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // signature + UplinkSignature []byte `protobuf:"bytes,3,opt,name=uplink_signature,json=uplinkSignature,proto3" json:"uplink_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OrderSigning) Reset() { *m = OrderSigning{} } +func (m *OrderSigning) String() string { return proto.CompactTextString(m) } +func (*OrderSigning) ProtoMessage() {} +func (*OrderSigning) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{3} +} +func (m *OrderSigning) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OrderSigning.Unmarshal(m, b) +} +func (m *OrderSigning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OrderSigning.Marshal(b, m, deterministic) +} +func (m *OrderSigning) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderSigning.Merge(m, src) +} +func (m *OrderSigning) XXX_Size() int { + return xxx_messageInfo_OrderSigning.Size(m) +} +func (m *OrderSigning) XXX_DiscardUnknown() { + xxx_messageInfo_OrderSigning.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderSigning proto.InternalMessageInfo + +func (m *OrderSigning) GetAmount() int64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *OrderSigning) GetUplinkSignature() []byte { + if m != nil { + return m.UplinkSignature + } + return nil +} + +type PieceHash struct { + // piece id + PieceId PieceID `protobuf:"bytes,1,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"` + // hash of the piece that was/is uploaded + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + // size of uploaded piece + PieceSize int64 `protobuf:"varint,4,opt,name=piece_size,json=pieceSize,proto3" json:"piece_size,omitempty"` + // timestamp when upload occurred + Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + // signature either satellite or storage node + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceHash) Reset() { *m = PieceHash{} } +func (m *PieceHash) String() string { return proto.CompactTextString(m) } +func (*PieceHash) ProtoMessage() {} +func (*PieceHash) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{4} +} +func (m *PieceHash) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceHash.Unmarshal(m, b) +} +func (m *PieceHash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceHash.Marshal(b, m, deterministic) +} +func (m *PieceHash) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceHash.Merge(m, src) +} +func (m *PieceHash) XXX_Size() int { + return xxx_messageInfo_PieceHash.Size(m) +} +func (m *PieceHash) XXX_DiscardUnknown() { + xxx_messageInfo_PieceHash.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceHash proto.InternalMessageInfo + +func (m *PieceHash) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *PieceHash) GetPieceSize() int64 { + if m != nil { + return m.PieceSize + } + return 0 +} + +func (m *PieceHash) GetTimestamp() time.Time { + if m != nil { + return m.Timestamp + } + return time.Time{} +} + +func (m *PieceHash) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// PieceHashSigning provides piece hash signing format. +// +// It is never used for sending across the network, it is +// used in signing to ensure that nullable=false fields get handled properly. +// Its purpose is to solidify the format of how we serialize for +// signing, to handle some backwards compatibility considerations. +type PieceHashSigning struct { + // piece id + PieceId PieceID `protobuf:"bytes,1,opt,name=piece_id,json=pieceId,proto3,customtype=PieceID" json:"piece_id"` + // hash of the piece that was/is uploaded + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + // size of uploaded piece + PieceSize int64 `protobuf:"varint,4,opt,name=piece_size,json=pieceSize,proto3" json:"piece_size,omitempty"` + // timestamp when upload occurred + Timestamp *time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp,omitempty"` + // signature either satellite or storage node + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceHashSigning) Reset() { *m = PieceHashSigning{} } +func (m *PieceHashSigning) String() string { return proto.CompactTextString(m) } +func (*PieceHashSigning) ProtoMessage() {} +func (*PieceHashSigning) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{5} +} +func (m *PieceHashSigning) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceHashSigning.Unmarshal(m, b) +} +func (m *PieceHashSigning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceHashSigning.Marshal(b, m, deterministic) +} +func (m *PieceHashSigning) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceHashSigning.Merge(m, src) +} +func (m *PieceHashSigning) XXX_Size() int { + return xxx_messageInfo_PieceHashSigning.Size(m) +} +func (m *PieceHashSigning) XXX_DiscardUnknown() { + xxx_messageInfo_PieceHashSigning.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceHashSigning proto.InternalMessageInfo + +func (m *PieceHashSigning) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *PieceHashSigning) GetPieceSize() int64 { + if m != nil { + return m.PieceSize + } + return 0 +} + +func (m *PieceHashSigning) GetTimestamp() *time.Time { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *PieceHashSigning) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +// Expected order of messages from storagenode: +// go repeated +// SettlementRequest -> (async) +// go repeated +// <- SettlementResponse +type SettlementRequest struct { + Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` + Order *Order `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SettlementRequest) Reset() { *m = SettlementRequest{} } +func (m *SettlementRequest) String() string { return proto.CompactTextString(m) } +func (*SettlementRequest) ProtoMessage() {} +func (*SettlementRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{6} +} +func (m *SettlementRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SettlementRequest.Unmarshal(m, b) +} +func (m *SettlementRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SettlementRequest.Marshal(b, m, deterministic) +} +func (m *SettlementRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SettlementRequest.Merge(m, src) +} +func (m *SettlementRequest) XXX_Size() int { + return xxx_messageInfo_SettlementRequest.Size(m) +} +func (m *SettlementRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SettlementRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SettlementRequest proto.InternalMessageInfo + +func (m *SettlementRequest) GetLimit() *OrderLimit { + if m != nil { + return m.Limit + } + return nil +} + +func (m *SettlementRequest) GetOrder() *Order { + if m != nil { + return m.Order + } + return nil +} + +type SettlementResponse struct { + SerialNumber SerialNumber `protobuf:"bytes,1,opt,name=serial_number,json=serialNumber,proto3,customtype=SerialNumber" json:"serial_number"` + Status SettlementResponse_Status `protobuf:"varint,2,opt,name=status,proto3,enum=orders.SettlementResponse_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SettlementResponse) Reset() { *m = SettlementResponse{} } +func (m *SettlementResponse) String() string { return proto.CompactTextString(m) } +func (*SettlementResponse) ProtoMessage() {} +func (*SettlementResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e0f5d4cf0fc9e41b, []int{7} +} +func (m *SettlementResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SettlementResponse.Unmarshal(m, b) +} +func (m *SettlementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SettlementResponse.Marshal(b, m, deterministic) +} +func (m *SettlementResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SettlementResponse.Merge(m, src) +} +func (m *SettlementResponse) XXX_Size() int { + return xxx_messageInfo_SettlementResponse.Size(m) +} +func (m *SettlementResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SettlementResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SettlementResponse proto.InternalMessageInfo + +func (m *SettlementResponse) GetStatus() SettlementResponse_Status { + if m != nil { + return m.Status + } + return SettlementResponse_INVALID +} + +func init() { + proto.RegisterEnum("orders.PieceAction", PieceAction_name, PieceAction_value) + proto.RegisterEnum("orders.SettlementResponse_Status", SettlementResponse_Status_name, SettlementResponse_Status_value) + proto.RegisterType((*OrderLimit)(nil), "orders.OrderLimit") + proto.RegisterType((*OrderLimitSigning)(nil), "orders.OrderLimitSigning") + proto.RegisterType((*Order)(nil), "orders.Order") + proto.RegisterType((*OrderSigning)(nil), "orders.OrderSigning") + proto.RegisterType((*PieceHash)(nil), "orders.PieceHash") + proto.RegisterType((*PieceHashSigning)(nil), "orders.PieceHashSigning") + proto.RegisterType((*SettlementRequest)(nil), "orders.SettlementRequest") + proto.RegisterType((*SettlementResponse)(nil), "orders.SettlementResponse") +} + +func init() { proto.RegisterFile("orders.proto", fileDescriptor_e0f5d4cf0fc9e41b) } + +var fileDescriptor_e0f5d4cf0fc9e41b = []byte{ + // 892 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0x4b, 0x6f, 0x23, 0x45, + 0x10, 0x4e, 0xc7, 0xf6, 0x38, 0x2e, 0x3f, 0x32, 0xee, 0x0d, 0x2b, 0x63, 0x81, 0x12, 0xcc, 0xc5, + 0x2c, 0x92, 0xcd, 0x1a, 0x09, 0x69, 0x25, 0x14, 0xc9, 0x8f, 0x21, 0x0c, 0x89, 0xb2, 0x56, 0xdb, + 0x46, 0x88, 0x8b, 0x35, 0xf6, 0x34, 0xce, 0xb0, 0xf6, 0xcc, 0x30, 0xdd, 0x23, 0xb1, 0x7b, 0xe0, + 0x82, 0xb8, 0x71, 0xe0, 0x0f, 0x71, 0xe7, 0x80, 0xc4, 0x9d, 0xc3, 0xf2, 0x3f, 0x38, 0xa1, 0xae, + 0x79, 0x39, 0x90, 0x15, 0x38, 0x1b, 0x24, 0xd8, 0xdb, 0x54, 0x57, 0x7d, 0x55, 0x5d, 0x5d, 0xdf, + 0x57, 0x03, 0x15, 0x2f, 0xb0, 0x79, 0x20, 0x3a, 0x7e, 0xe0, 0x49, 0x8f, 0x6a, 0x91, 0xd5, 0x84, + 0x95, 0xb7, 0xf2, 0xa2, 0xb3, 0xe6, 0xf1, 0xca, 0xf3, 0x56, 0x6b, 0xde, 0x45, 0x6b, 0x11, 0x7e, + 0xd1, 0x95, 0xce, 0x86, 0x0b, 0x69, 0x6d, 0xfc, 0x38, 0x00, 0x5c, 0xcf, 0xe6, 0xd1, 0x77, 0xeb, + 0x5b, 0x0d, 0xe0, 0xb1, 0xca, 0x71, 0xe1, 0x6c, 0x1c, 0x49, 0x1f, 0x41, 0x55, 0xf0, 0xc0, 0xb1, + 0xd6, 0x73, 0x37, 0xdc, 0x2c, 0x78, 0xd0, 0x20, 0x27, 0xa4, 0x5d, 0x19, 0x1c, 0xfd, 0xf4, 0xfc, + 0x78, 0xef, 0xd7, 0xe7, 0xc7, 0x95, 0x09, 0x3a, 0x2f, 0xd1, 0xc7, 0x2a, 0x62, 0xcb, 0xa2, 0x0f, + 0xa1, 0x22, 0x2c, 0xc9, 0xd7, 0x6b, 0x47, 0xf2, 0xb9, 0x63, 0x37, 0xf6, 0x11, 0x59, 0x8b, 0x91, + 0xda, 0xa5, 0x67, 0x73, 0x73, 0xc4, 0xca, 0x69, 0x8c, 0x69, 0xd3, 0x0f, 0xe1, 0xc8, 0xe6, 0x7e, + 0xc0, 0x97, 0x96, 0xe4, 0xf6, 0x3c, 0xf4, 0xd7, 0x8e, 0xfb, 0x44, 0x41, 0x73, 0x08, 0x85, 0x2d, + 0x18, 0xcd, 0xe2, 0x66, 0x18, 0x66, 0xda, 0x74, 0x00, 0xf5, 0x18, 0xe2, 0x87, 0x8b, 0xb5, 0xb3, + 0x9c, 0x3f, 0xe1, 0x4f, 0x1b, 0x55, 0x84, 0xde, 0x8f, 0xab, 0xd6, 0xc6, 0x0e, 0x5f, 0xf2, 0x31, + 0xba, 0xcf, 0xf9, 0x53, 0x76, 0x18, 0x01, 0xd2, 0x03, 0xfa, 0x01, 0x1c, 0x0a, 0xe9, 0x05, 0xd6, + 0x8a, 0xcf, 0xd5, 0xa3, 0xa8, 0xe2, 0xf9, 0x1b, 0xef, 0x5d, 0x8d, 0xc3, 0xd0, 0xb4, 0xe9, 0x03, + 0x38, 0xf0, 0x55, 0x6a, 0x05, 0x28, 0x20, 0xe0, 0x30, 0x06, 0x14, 0xb1, 0xa4, 0x39, 0x62, 0x45, + 0x0c, 0x30, 0x6d, 0x7a, 0x04, 0x85, 0xb5, 0x7a, 0xdc, 0x86, 0x76, 0x42, 0xda, 0x39, 0x16, 0x19, + 0xf4, 0x5d, 0xd0, 0xac, 0xa5, 0x74, 0x3c, 0xb7, 0x51, 0x3c, 0x21, 0xed, 0x5a, 0xef, 0x5e, 0x27, + 0x1e, 0x2c, 0xe2, 0xfb, 0xe8, 0x62, 0x71, 0x08, 0x7d, 0x0c, 0x7a, 0x54, 0x8e, 0x7f, 0xed, 0x3b, + 0x81, 0x85, 0xb0, 0x83, 0x13, 0xd2, 0x2e, 0xf7, 0x9a, 0x9d, 0x68, 0xda, 0x9d, 0x64, 0xda, 0x9d, + 0x69, 0x32, 0xed, 0xc1, 0x81, 0xba, 0xd2, 0x0f, 0xbf, 0x1d, 0x13, 0x76, 0x88, 0x68, 0x23, 0x05, + 0xab, 0x84, 0x58, 0x6e, 0x3b, 0x61, 0x69, 0x97, 0x84, 0x88, 0xde, 0x4a, 0x78, 0x0e, 0xb5, 0x28, + 0xe1, 0x32, 0xe0, 0x51, 0xba, 0xca, 0x0e, 0xe9, 0xaa, 0x88, 0x1d, 0xc6, 0x50, 0xda, 0x85, 0x7b, + 0x19, 0x95, 0x84, 0xb3, 0x72, 0x2d, 0x19, 0x06, 0xbc, 0x01, 0xea, 0xa1, 0x19, 0x4d, 0x5d, 0x93, + 0xc4, 0x43, 0x4f, 0xa1, 0x9e, 0x01, 0x2c, 0xdb, 0x0e, 0xb8, 0x10, 0x8d, 0x32, 0x5e, 0xa0, 0xde, + 0x41, 0xb6, 0xab, 0xb9, 0xf5, 0x23, 0x07, 0xd3, 0xd3, 0xd8, 0xf8, 0xa4, 0xf5, 0x7b, 0x01, 0xea, + 0x99, 0x0a, 0x54, 0x5e, 0xc7, 0x5d, 0xfd, 0xaf, 0xc4, 0x70, 0xfa, 0x62, 0x31, 0xd0, 0x57, 0x48, + 0x08, 0xe7, 0xb7, 0x12, 0x42, 0xfe, 0x66, 0x11, 0x9c, 0xdf, 0x4a, 0x04, 0xf9, 0x9b, 0x05, 0x70, + 0x76, 0x0b, 0x01, 0xe4, 0xff, 0x13, 0xe4, 0xff, 0x8e, 0x40, 0x01, 0xc9, 0xff, 0x32, 0x84, 0xbf, + 0x0f, 0x9a, 0xb5, 0xf1, 0x42, 0x57, 0x22, 0xd5, 0x73, 0x2c, 0xb6, 0xe8, 0x3b, 0xa0, 0xc7, 0xbc, + 0xcc, 0x5a, 0x41, 0x46, 0x27, 0x14, 0x4c, 0xfb, 0x68, 0x7d, 0x4f, 0xa0, 0x82, 0xf7, 0xb8, 0x03, + 0xfd, 0xdd, 0xc1, 0x75, 0x7e, 0x26, 0x50, 0x42, 0x0a, 0x7e, 0x6c, 0x89, 0xab, 0x6b, 0x3c, 0x27, + 0x7f, 0xc3, 0x73, 0x0a, 0xf9, 0x2b, 0x4b, 0x5c, 0x45, 0xa2, 0x67, 0xf8, 0x4d, 0xdf, 0x04, 0x88, + 0xf0, 0xc2, 0x79, 0xc6, 0x51, 0x5a, 0x39, 0x56, 0xc2, 0x93, 0x89, 0xf3, 0x8c, 0xd3, 0x01, 0x94, + 0xd2, 0xbf, 0x34, 0xea, 0xe8, 0x9f, 0x6e, 0xce, 0x0c, 0x46, 0xdf, 0x80, 0xd2, 0x9f, 0x9b, 0xca, + 0x0e, 0x5a, 0xbf, 0x10, 0xd0, 0xd3, 0x76, 0x92, 0x17, 0xfe, 0x97, 0xbb, 0x3a, 0xdd, 0xad, 0xab, + 0xfc, 0x6e, 0x1d, 0x2d, 0xa0, 0x3e, 0xe1, 0x52, 0xae, 0xf9, 0x86, 0xbb, 0x92, 0xf1, 0xaf, 0x42, + 0x2e, 0x24, 0x6d, 0x27, 0x3b, 0x86, 0x60, 0x39, 0x9a, 0x2c, 0x93, 0x6c, 0xbb, 0x27, 0x7b, 0xe7, + 0x6d, 0x28, 0xa0, 0x0f, 0x1b, 0x2a, 0xf7, 0xaa, 0xd7, 0x22, 0x59, 0xe4, 0x6b, 0xfd, 0x48, 0x80, + 0x6e, 0x17, 0x11, 0xbe, 0xe7, 0x0a, 0xfe, 0x32, 0xcc, 0x7c, 0x04, 0x9a, 0x90, 0x96, 0x0c, 0x05, + 0xd6, 0xad, 0xf5, 0xde, 0x4a, 0xea, 0xfe, 0xb5, 0x4c, 0x67, 0x82, 0x81, 0x2c, 0x06, 0xb4, 0x1e, + 0x82, 0x16, 0x9d, 0xd0, 0x32, 0x14, 0xcd, 0xcb, 0x4f, 0xfb, 0x17, 0xe6, 0x48, 0xdf, 0xa3, 0x15, + 0x38, 0xe8, 0x0f, 0x87, 0xc6, 0x78, 0x6a, 0x8c, 0x74, 0xa2, 0x2c, 0x66, 0x7c, 0x62, 0x0c, 0x95, + 0xb5, 0xff, 0xe0, 0x1b, 0x28, 0x6f, 0xad, 0xd1, 0xeb, 0xb8, 0x22, 0xe4, 0xc6, 0xb3, 0xa9, 0x4e, + 0xd4, 0xc7, 0x99, 0x31, 0xd5, 0xf7, 0x69, 0x15, 0x4a, 0x67, 0xc6, 0x74, 0xde, 0x9f, 0x8d, 0xcc, + 0xa9, 0x9e, 0xa3, 0x35, 0x00, 0x65, 0x32, 0x63, 0xdc, 0x37, 0x99, 0x9e, 0x57, 0xf6, 0x78, 0x96, + 0xda, 0x05, 0x0a, 0xa0, 0x8d, 0x8c, 0x0b, 0x63, 0x6a, 0xe8, 0x1a, 0x7d, 0x0d, 0xea, 0xca, 0x77, + 0xc6, 0xfa, 0x43, 0xe3, 0xa3, 0xd9, 0xc5, 0xdc, 0xf8, 0xcc, 0x9c, 0xea, 0xc5, 0xde, 0x04, 0x34, + 0x7c, 0x4f, 0x41, 0x4d, 0x80, 0xac, 0x43, 0xfa, 0xfa, 0x4d, 0x5d, 0xe3, 0x04, 0x9b, 0xcd, 0x17, + 0x3f, 0x48, 0x6b, 0xaf, 0x4d, 0xde, 0x23, 0x83, 0xa3, 0xcf, 0xa9, 0xfa, 0x09, 0x7d, 0xd9, 0x71, + 0xbc, 0xee, 0xd2, 0xdb, 0x6c, 0x3c, 0xb7, 0xeb, 0x2f, 0x16, 0x1a, 0x32, 0xea, 0xfd, 0x3f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xb4, 0xbc, 0x85, 0xa1, 0x21, 0x0b, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCOrdersClient interface { + DRPCConn() drpc.Conn + + Settlement(ctx context.Context) (DRPCOrders_SettlementClient, error) +} + +type drpcOrdersClient struct { + cc drpc.Conn +} + +func NewDRPCOrdersClient(cc drpc.Conn) DRPCOrdersClient { + return &drpcOrdersClient{cc} +} + +func (c *drpcOrdersClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcOrdersClient) Settlement(ctx context.Context) (DRPCOrders_SettlementClient, error) { + stream, err := c.cc.NewStream(ctx, "/orders.Orders/Settlement") + if err != nil { + return nil, err + } + x := &drpcOrdersSettlementClient{stream} + return x, nil +} + +type DRPCOrders_SettlementClient interface { + drpc.Stream + Send(*SettlementRequest) error + Recv() (*SettlementResponse, error) +} + +type drpcOrdersSettlementClient struct { + drpc.Stream +} + +func (x *drpcOrdersSettlementClient) Send(m *SettlementRequest) error { + return x.MsgSend(m) +} + +func (x *drpcOrdersSettlementClient) Recv() (*SettlementResponse, error) { + m := new(SettlementResponse) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +type DRPCOrdersServer interface { + Settlement(DRPCOrders_SettlementStream) error +} + +type DRPCOrdersDescription struct{} + +func (DRPCOrdersDescription) NumMethods() int { return 1 } + +func (DRPCOrdersDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/orders.Orders/Settlement", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return nil, srv.(DRPCOrdersServer). + Settlement( + &drpcOrdersSettlementStream{in1.(drpc.Stream)}, + ) + }, DRPCOrdersServer.Settlement, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterOrders(mux drpc.Mux, impl DRPCOrdersServer) error { + return mux.Register(impl, DRPCOrdersDescription{}) +} + +type DRPCOrders_SettlementStream interface { + drpc.Stream + Send(*SettlementResponse) error + Recv() (*SettlementRequest, error) +} + +type drpcOrdersSettlementStream struct { + drpc.Stream +} + +func (x *drpcOrdersSettlementStream) Send(m *SettlementResponse) error { + return x.MsgSend(m) +} + +func (x *drpcOrdersSettlementStream) Recv() (*SettlementRequest, error) { + m := new(SettlementRequest) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/orders.proto b/vendor/storj.io/common/pb/orders.proto new file mode 100644 index 000000000..c05cc4c00 --- /dev/null +++ b/vendor/storj.io/common/pb/orders.proto @@ -0,0 +1,168 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package orders; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "node.proto"; + +// PieceAction is an enumeration of all possible executed actions on storage node +enum PieceAction { + INVALID = 0; + PUT = 1; + GET = 2; + GET_AUDIT = 3; + GET_REPAIR = 4; + PUT_REPAIR = 5; + DELETE = 6; + PUT_GRACEFUL_EXIT = 7; +} + +// OrderLimit is provided by satellite to execute specific action on storage node within some limits +message OrderLimit { + // unique serial to avoid replay attacks + bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; + // satellite who issued this order limit allowing orderer to do the specified action + bytes satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + // uplink who requested or whom behalf the order limit to do an action + bytes deprecated_uplink_id = 3 [(gogoproto.customtype) = "NodeID"]; + // public key that will be used to sign orders and piece hash + bytes uplink_public_key = 13 [(gogoproto.customtype) = "PiecePublicKey", (gogoproto.nullable) = false]; + // storage node who can re claimthe order limit specified by serial + bytes storage_node_id = 4 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + + // piece which is allowed to be touched + bytes piece_id = 5 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + // limit in bytes how much can be changed + int64 limit = 6; + PieceAction action = 7; + + google.protobuf.Timestamp piece_expiration = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp order_expiration = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp order_creation = 12 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes satellite_signature = 10; + // satellites aren't necessarily discoverable in kademlia. this allows + // a storage node to find a satellite and handshake with it to get its key. + node.NodeAddress satellite_address = 11; +} + +// OrderLimitSigning provides OrderLimit signing serialization +// +// It is never used for sending across the network, it is +// used in signing to ensure that nullable=false fields get handled properly. +// Its purpose is to solidify the format of how we serialize for +// signing, to handle some backwards compatibility considerations. +message OrderLimitSigning { + // unique serial to avoid replay attacks + bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; + // satellite who issued this order limit allowing orderer to do the specified action + bytes satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + // uplink who requested or whom behalf the order limit to do an action + bytes deprecated_uplink_id = 3 [(gogoproto.customtype) = "NodeID"]; + // public key that will be used to sign orders and piece hash + bytes uplink_public_key = 13 [(gogoproto.customtype) = "PiecePublicKey"]; + // storage node who can re claimthe order limit specified by serial + bytes storage_node_id = 4 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + + // piece which is allowed to be touched + bytes piece_id = 5 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + // limit in bytes how much can be changed + int64 limit = 6; + PieceAction action = 7; + + google.protobuf.Timestamp piece_expiration = 8 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp order_expiration = 9 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp order_creation = 12 [(gogoproto.stdtime) = true]; + + bytes satellite_signature = 10; + + // this allows a storage node to find a satellite and handshake with it to get its key. + node.NodeAddress satellite_address = 11; +} + +// Order is a one step of fullfilling Amount number of bytes from an OrderLimit with SerialNumber +message Order { + // serial of the order limit that was signed + bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; + // amount to be signed for + int64 amount = 2; + // signature + bytes uplink_signature = 3; +} + +// OrderSigning provides Order signing format +// +// It is never used for sending across the network, it is +// used in signing to ensure that nullable=false fields get handled properly. +// Its purpose is to solidify the format of how we serialize for +// signing, to handle some backwards compatibility considerations. +message OrderSigning { + // serial of the order limit that was signed + bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; + // amount to be signed for + int64 amount = 2; + // signature + bytes uplink_signature = 3; +} + +message PieceHash { + // piece id + bytes piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + // hash of the piece that was/is uploaded + bytes hash = 2; + // size of uploaded piece + int64 piece_size = 4; + // timestamp when upload occurred + google.protobuf.Timestamp timestamp = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + // signature either satellite or storage node + bytes signature = 3; +} + +// PieceHashSigning provides piece hash signing format. +// +// It is never used for sending across the network, it is +// used in signing to ensure that nullable=false fields get handled properly. +// Its purpose is to solidify the format of how we serialize for +// signing, to handle some backwards compatibility considerations. +message PieceHashSigning { + // piece id + bytes piece_id = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + // hash of the piece that was/is uploaded + bytes hash = 2; + // size of uploaded piece + int64 piece_size = 4; + // timestamp when upload occurred + google.protobuf.Timestamp timestamp = 5 [(gogoproto.stdtime) = true]; + // signature either satellite or storage node + bytes signature = 3; +} + +service Orders { + rpc Settlement(stream SettlementRequest) returns (stream SettlementResponse) {} +} + +// Expected order of messages from storagenode: +// go repeated +// SettlementRequest -> (async) +// go repeated +// <- SettlementResponse +message SettlementRequest { + OrderLimit limit = 1; + Order order = 2; +} + +message SettlementResponse { + enum Status { + INVALID = 0; + ACCEPTED = 1; + REJECTED = 2; + } + + bytes serial_number = 1 [(gogoproto.customtype) = "SerialNumber", (gogoproto.nullable) = false]; + Status status = 2; +} diff --git a/vendor/storj.io/common/pb/overlay.pb.go b/vendor/storj.io/common/pb/overlay.pb.go new file mode 100644 index 000000000..d13a7ead9 --- /dev/null +++ b/vendor/storj.io/common/pb/overlay.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: overlay.proto + +package pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Restriction_Operator int32 + +const ( + Restriction_LT Restriction_Operator = 0 + Restriction_EQ Restriction_Operator = 1 + Restriction_GT Restriction_Operator = 2 + Restriction_LTE Restriction_Operator = 3 + Restriction_GTE Restriction_Operator = 4 +) + +var Restriction_Operator_name = map[int32]string{ + 0: "LT", + 1: "EQ", + 2: "GT", + 3: "LTE", + 4: "GTE", +} + +var Restriction_Operator_value = map[string]int32{ + "LT": 0, + "EQ": 1, + "GT": 2, + "LTE": 3, + "GTE": 4, +} + +func (x Restriction_Operator) String() string { + return proto.EnumName(Restriction_Operator_name, int32(x)) +} + +func (Restriction_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_61fc82527fbe24ad, []int{1, 0} +} + +type Restriction_Operand int32 + +const ( + Restriction_FREE_BANDWIDTH Restriction_Operand = 0 + Restriction_FREE_DISK Restriction_Operand = 1 +) + +var Restriction_Operand_name = map[int32]string{ + 0: "FREE_BANDWIDTH", + 1: "FREE_DISK", +} + +var Restriction_Operand_value = map[string]int32{ + "FREE_BANDWIDTH": 0, + "FREE_DISK": 1, +} + +func (x Restriction_Operand) String() string { + return proto.EnumName(Restriction_Operand_name, int32(x)) +} + +func (Restriction_Operand) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_61fc82527fbe24ad, []int{1, 1} +} + +type InfoResponse struct { + Type NodeType `protobuf:"varint,2,opt,name=type,proto3,enum=node.NodeType" json:"type,omitempty"` + Operator *NodeOperator `protobuf:"bytes,3,opt,name=operator,proto3" json:"operator,omitempty"` + Capacity *NodeCapacity `protobuf:"bytes,4,opt,name=capacity,proto3" json:"capacity,omitempty"` + Version *NodeVersion `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_61fc82527fbe24ad, []int{0} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InfoResponse.Unmarshal(m, b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) +} +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) +} +func (m *InfoResponse) XXX_Size() int { + return xxx_messageInfo_InfoResponse.Size(m) +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo + +func (m *InfoResponse) GetType() NodeType { + if m != nil { + return m.Type + } + return NodeType_INVALID +} + +func (m *InfoResponse) GetOperator() *NodeOperator { + if m != nil { + return m.Operator + } + return nil +} + +func (m *InfoResponse) GetCapacity() *NodeCapacity { + if m != nil { + return m.Capacity + } + return nil +} + +func (m *InfoResponse) GetVersion() *NodeVersion { + if m != nil { + return m.Version + } + return nil +} + +type Restriction struct { + Operator Restriction_Operator `protobuf:"varint,1,opt,name=operator,proto3,enum=overlay.Restriction_Operator" json:"operator,omitempty"` + Operand Restriction_Operand `protobuf:"varint,2,opt,name=operand,proto3,enum=overlay.Restriction_Operand" json:"operand,omitempty"` + Value int64 `protobuf:"varint,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Restriction) Reset() { *m = Restriction{} } +func (m *Restriction) String() string { return proto.CompactTextString(m) } +func (*Restriction) ProtoMessage() {} +func (*Restriction) Descriptor() ([]byte, []int) { + return fileDescriptor_61fc82527fbe24ad, []int{1} +} +func (m *Restriction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Restriction.Unmarshal(m, b) +} +func (m *Restriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Restriction.Marshal(b, m, deterministic) +} +func (m *Restriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Restriction.Merge(m, src) +} +func (m *Restriction) XXX_Size() int { + return xxx_messageInfo_Restriction.Size(m) +} +func (m *Restriction) XXX_DiscardUnknown() { + xxx_messageInfo_Restriction.DiscardUnknown(m) +} + +var xxx_messageInfo_Restriction proto.InternalMessageInfo + +func (m *Restriction) GetOperator() Restriction_Operator { + if m != nil { + return m.Operator + } + return Restriction_LT +} + +func (m *Restriction) GetOperand() Restriction_Operand { + if m != nil { + return m.Operand + } + return Restriction_FREE_BANDWIDTH +} + +func (m *Restriction) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("overlay.Restriction_Operator", Restriction_Operator_name, Restriction_Operator_value) + proto.RegisterEnum("overlay.Restriction_Operand", Restriction_Operand_name, Restriction_Operand_value) + proto.RegisterType((*InfoResponse)(nil), "overlay.InfoResponse") + proto.RegisterType((*Restriction)(nil), "overlay.Restriction") +} + +func init() { proto.RegisterFile("overlay.proto", fileDescriptor_61fc82527fbe24ad) } + +var fileDescriptor_61fc82527fbe24ad = []byte{ + // 330 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x5f, 0x4b, 0xf3, 0x30, + 0x14, 0xc6, 0xd7, 0x75, 0x5b, 0xf7, 0x9e, 0xbd, 0x1b, 0x35, 0xec, 0xa2, 0x88, 0xc2, 0xe8, 0xd5, + 0x40, 0xe9, 0x60, 0x8a, 0xe0, 0xa5, 0x73, 0x75, 0x0e, 0xc7, 0xc4, 0x58, 0x14, 0xbc, 0x91, 0xae, + 0x8d, 0x50, 0xd9, 0x72, 0x42, 0x1a, 0x07, 0xfd, 0x74, 0x7e, 0x2f, 0xaf, 0xa4, 0x59, 0xbb, 0x0d, + 0xd1, 0xab, 0xf3, 0xe7, 0xf9, 0x25, 0x39, 0xe7, 0x09, 0xb4, 0x71, 0xcd, 0xe4, 0x32, 0xcc, 0x3c, + 0x21, 0x51, 0x21, 0xb1, 0x8a, 0xf2, 0x10, 0x38, 0xc6, 0x6c, 0xd3, 0x74, 0x3f, 0x0d, 0xf8, 0x3f, + 0xe5, 0x6f, 0x48, 0x59, 0x2a, 0x90, 0xa7, 0x8c, 0xb8, 0x50, 0x53, 0x99, 0x60, 0x4e, 0xb5, 0x67, + 0xf4, 0x3b, 0xc3, 0x8e, 0xa7, 0xd9, 0x39, 0xc6, 0x2c, 0xc8, 0x04, 0xa3, 0x5a, 0x23, 0x1e, 0x34, + 0x51, 0x30, 0x19, 0x2a, 0x94, 0x8e, 0xd9, 0x33, 0xfa, 0xad, 0x21, 0xd9, 0x71, 0xf7, 0x85, 0x42, + 0xb7, 0x4c, 0xce, 0x47, 0xa1, 0x08, 0xa3, 0x44, 0x65, 0x4e, 0xed, 0x27, 0x7f, 0x5d, 0x28, 0x74, + 0xcb, 0x90, 0x13, 0xb0, 0xd6, 0x4c, 0xa6, 0x09, 0x72, 0xa7, 0xae, 0xf1, 0x83, 0x1d, 0xfe, 0xb4, + 0x11, 0x68, 0x49, 0xb8, 0x5f, 0x06, 0xb4, 0x28, 0x4b, 0x95, 0x4c, 0x22, 0x95, 0x20, 0x27, 0x97, + 0x7b, 0xc3, 0x19, 0x7a, 0x89, 0x63, 0xaf, 0x34, 0x62, 0x8f, 0xf3, 0x7e, 0x99, 0xf3, 0x02, 0x2c, + 0x9d, 0xf3, 0xb8, 0x58, 0xff, 0xe8, 0xef, 0x93, 0x3c, 0xa6, 0x25, 0x4c, 0xba, 0x50, 0x5f, 0x87, + 0xcb, 0x0f, 0xa6, 0xcd, 0x30, 0xe9, 0xa6, 0x70, 0xcf, 0xa1, 0x59, 0xbe, 0x41, 0x1a, 0x50, 0x9d, + 0x05, 0x76, 0x25, 0x8f, 0xfe, 0x83, 0x6d, 0xe4, 0x71, 0x12, 0xd8, 0x55, 0x62, 0x81, 0x39, 0x0b, + 0x7c, 0xdb, 0xcc, 0x93, 0x49, 0xe0, 0xdb, 0x35, 0xf7, 0x14, 0xac, 0xe2, 0x7e, 0x42, 0xa0, 0x73, + 0x43, 0x7d, 0xff, 0x75, 0x74, 0x35, 0x1f, 0x3f, 0x4f, 0xc7, 0xc1, 0xad, 0x5d, 0x21, 0x6d, 0xf8, + 0xa7, 0x7b, 0xe3, 0xe9, 0xe3, 0x9d, 0x6d, 0x8c, 0xba, 0x2f, 0x24, 0x55, 0x28, 0xdf, 0xbd, 0x04, + 0x07, 0x11, 0xae, 0x56, 0xc8, 0x07, 0x62, 0xb1, 0x68, 0xe8, 0xbf, 0x3d, 0xfb, 0x0e, 0x00, 0x00, + 0xff, 0xff, 0x90, 0x70, 0xdc, 0xec, 0x01, 0x02, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/overlay.proto b/vendor/storj.io/common/pb/overlay.proto new file mode 100644 index 000000000..d7c44ced7 --- /dev/null +++ b/vendor/storj.io/common/pb/overlay.proto @@ -0,0 +1,34 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +import "node.proto"; + +package overlay; + +message InfoResponse { + node.NodeType type = 2; + node.NodeOperator operator = 3; + node.NodeCapacity capacity = 4; + node.NodeVersion version = 5; +} + +message Restriction { + enum Operator { + LT = 0; + EQ = 1; + GT = 2; + LTE = 3; + GTE = 4; + } + enum Operand { + FREE_BANDWIDTH = 0; + FREE_DISK = 1; + } + + Operator operator = 1; + Operand operand = 2; + int64 value = 3; +} diff --git a/vendor/storj.io/common/pb/payments.pb.go b/vendor/storj.io/common/pb/payments.pb.go new file mode 100644 index 000000000..a6a7e539b --- /dev/null +++ b/vendor/storj.io/common/pb/payments.pb.go @@ -0,0 +1,593 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: payments.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PrepareInvoiceRecordsRequest struct { + Period time.Time `protobuf:"bytes,1,opt,name=period,proto3,stdtime" json:"period"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareInvoiceRecordsRequest) Reset() { *m = PrepareInvoiceRecordsRequest{} } +func (m *PrepareInvoiceRecordsRequest) String() string { return proto.CompactTextString(m) } +func (*PrepareInvoiceRecordsRequest) ProtoMessage() {} +func (*PrepareInvoiceRecordsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{0} +} +func (m *PrepareInvoiceRecordsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareInvoiceRecordsRequest.Unmarshal(m, b) +} +func (m *PrepareInvoiceRecordsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareInvoiceRecordsRequest.Marshal(b, m, deterministic) +} +func (m *PrepareInvoiceRecordsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareInvoiceRecordsRequest.Merge(m, src) +} +func (m *PrepareInvoiceRecordsRequest) XXX_Size() int { + return xxx_messageInfo_PrepareInvoiceRecordsRequest.Size(m) +} +func (m *PrepareInvoiceRecordsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareInvoiceRecordsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareInvoiceRecordsRequest proto.InternalMessageInfo + +func (m *PrepareInvoiceRecordsRequest) GetPeriod() time.Time { + if m != nil { + return m.Period + } + return time.Time{} +} + +type PrepareInvoiceRecordsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareInvoiceRecordsResponse) Reset() { *m = PrepareInvoiceRecordsResponse{} } +func (m *PrepareInvoiceRecordsResponse) String() string { return proto.CompactTextString(m) } +func (*PrepareInvoiceRecordsResponse) ProtoMessage() {} +func (*PrepareInvoiceRecordsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{1} +} +func (m *PrepareInvoiceRecordsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareInvoiceRecordsResponse.Unmarshal(m, b) +} +func (m *PrepareInvoiceRecordsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareInvoiceRecordsResponse.Marshal(b, m, deterministic) +} +func (m *PrepareInvoiceRecordsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareInvoiceRecordsResponse.Merge(m, src) +} +func (m *PrepareInvoiceRecordsResponse) XXX_Size() int { + return xxx_messageInfo_PrepareInvoiceRecordsResponse.Size(m) +} +func (m *PrepareInvoiceRecordsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareInvoiceRecordsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareInvoiceRecordsResponse proto.InternalMessageInfo + +type ApplyInvoiceRecordsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyInvoiceRecordsRequest) Reset() { *m = ApplyInvoiceRecordsRequest{} } +func (m *ApplyInvoiceRecordsRequest) String() string { return proto.CompactTextString(m) } +func (*ApplyInvoiceRecordsRequest) ProtoMessage() {} +func (*ApplyInvoiceRecordsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{2} +} +func (m *ApplyInvoiceRecordsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyInvoiceRecordsRequest.Unmarshal(m, b) +} +func (m *ApplyInvoiceRecordsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyInvoiceRecordsRequest.Marshal(b, m, deterministic) +} +func (m *ApplyInvoiceRecordsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyInvoiceRecordsRequest.Merge(m, src) +} +func (m *ApplyInvoiceRecordsRequest) XXX_Size() int { + return xxx_messageInfo_ApplyInvoiceRecordsRequest.Size(m) +} +func (m *ApplyInvoiceRecordsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyInvoiceRecordsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyInvoiceRecordsRequest proto.InternalMessageInfo + +type ApplyInvoiceRecordsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyInvoiceRecordsResponse) Reset() { *m = ApplyInvoiceRecordsResponse{} } +func (m *ApplyInvoiceRecordsResponse) String() string { return proto.CompactTextString(m) } +func (*ApplyInvoiceRecordsResponse) ProtoMessage() {} +func (*ApplyInvoiceRecordsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{3} +} +func (m *ApplyInvoiceRecordsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyInvoiceRecordsResponse.Unmarshal(m, b) +} +func (m *ApplyInvoiceRecordsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyInvoiceRecordsResponse.Marshal(b, m, deterministic) +} +func (m *ApplyInvoiceRecordsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyInvoiceRecordsResponse.Merge(m, src) +} +func (m *ApplyInvoiceRecordsResponse) XXX_Size() int { + return xxx_messageInfo_ApplyInvoiceRecordsResponse.Size(m) +} +func (m *ApplyInvoiceRecordsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyInvoiceRecordsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyInvoiceRecordsResponse proto.InternalMessageInfo + +type ApplyInvoiceCouponsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyInvoiceCouponsRequest) Reset() { *m = ApplyInvoiceCouponsRequest{} } +func (m *ApplyInvoiceCouponsRequest) String() string { return proto.CompactTextString(m) } +func (*ApplyInvoiceCouponsRequest) ProtoMessage() {} +func (*ApplyInvoiceCouponsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{4} +} +func (m *ApplyInvoiceCouponsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyInvoiceCouponsRequest.Unmarshal(m, b) +} +func (m *ApplyInvoiceCouponsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyInvoiceCouponsRequest.Marshal(b, m, deterministic) +} +func (m *ApplyInvoiceCouponsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyInvoiceCouponsRequest.Merge(m, src) +} +func (m *ApplyInvoiceCouponsRequest) XXX_Size() int { + return xxx_messageInfo_ApplyInvoiceCouponsRequest.Size(m) +} +func (m *ApplyInvoiceCouponsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyInvoiceCouponsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyInvoiceCouponsRequest proto.InternalMessageInfo + +type ApplyInvoiceCouponsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyInvoiceCouponsResponse) Reset() { *m = ApplyInvoiceCouponsResponse{} } +func (m *ApplyInvoiceCouponsResponse) String() string { return proto.CompactTextString(m) } +func (*ApplyInvoiceCouponsResponse) ProtoMessage() {} +func (*ApplyInvoiceCouponsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{5} +} +func (m *ApplyInvoiceCouponsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyInvoiceCouponsResponse.Unmarshal(m, b) +} +func (m *ApplyInvoiceCouponsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyInvoiceCouponsResponse.Marshal(b, m, deterministic) +} +func (m *ApplyInvoiceCouponsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyInvoiceCouponsResponse.Merge(m, src) +} +func (m *ApplyInvoiceCouponsResponse) XXX_Size() int { + return xxx_messageInfo_ApplyInvoiceCouponsResponse.Size(m) +} +func (m *ApplyInvoiceCouponsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyInvoiceCouponsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyInvoiceCouponsResponse proto.InternalMessageInfo + +type ApplyInvoiceCreditsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyInvoiceCreditsRequest) Reset() { *m = ApplyInvoiceCreditsRequest{} } +func (m *ApplyInvoiceCreditsRequest) String() string { return proto.CompactTextString(m) } +func (*ApplyInvoiceCreditsRequest) ProtoMessage() {} +func (*ApplyInvoiceCreditsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{6} +} +func (m *ApplyInvoiceCreditsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyInvoiceCreditsRequest.Unmarshal(m, b) +} +func (m *ApplyInvoiceCreditsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyInvoiceCreditsRequest.Marshal(b, m, deterministic) +} +func (m *ApplyInvoiceCreditsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyInvoiceCreditsRequest.Merge(m, src) +} +func (m *ApplyInvoiceCreditsRequest) XXX_Size() int { + return xxx_messageInfo_ApplyInvoiceCreditsRequest.Size(m) +} +func (m *ApplyInvoiceCreditsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyInvoiceCreditsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyInvoiceCreditsRequest proto.InternalMessageInfo + +type ApplyInvoiceCreditsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyInvoiceCreditsResponse) Reset() { *m = ApplyInvoiceCreditsResponse{} } +func (m *ApplyInvoiceCreditsResponse) String() string { return proto.CompactTextString(m) } +func (*ApplyInvoiceCreditsResponse) ProtoMessage() {} +func (*ApplyInvoiceCreditsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{7} +} +func (m *ApplyInvoiceCreditsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyInvoiceCreditsResponse.Unmarshal(m, b) +} +func (m *ApplyInvoiceCreditsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyInvoiceCreditsResponse.Marshal(b, m, deterministic) +} +func (m *ApplyInvoiceCreditsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyInvoiceCreditsResponse.Merge(m, src) +} +func (m *ApplyInvoiceCreditsResponse) XXX_Size() int { + return xxx_messageInfo_ApplyInvoiceCreditsResponse.Size(m) +} +func (m *ApplyInvoiceCreditsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyInvoiceCreditsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyInvoiceCreditsResponse proto.InternalMessageInfo + +type CreateInvoicesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateInvoicesRequest) Reset() { *m = CreateInvoicesRequest{} } +func (m *CreateInvoicesRequest) String() string { return proto.CompactTextString(m) } +func (*CreateInvoicesRequest) ProtoMessage() {} +func (*CreateInvoicesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{8} +} +func (m *CreateInvoicesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateInvoicesRequest.Unmarshal(m, b) +} +func (m *CreateInvoicesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateInvoicesRequest.Marshal(b, m, deterministic) +} +func (m *CreateInvoicesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateInvoicesRequest.Merge(m, src) +} +func (m *CreateInvoicesRequest) XXX_Size() int { + return xxx_messageInfo_CreateInvoicesRequest.Size(m) +} +func (m *CreateInvoicesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateInvoicesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateInvoicesRequest proto.InternalMessageInfo + +type CreateInvoicesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateInvoicesResponse) Reset() { *m = CreateInvoicesResponse{} } +func (m *CreateInvoicesResponse) String() string { return proto.CompactTextString(m) } +func (*CreateInvoicesResponse) ProtoMessage() {} +func (*CreateInvoicesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a9566e6e864d2854, []int{9} +} +func (m *CreateInvoicesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateInvoicesResponse.Unmarshal(m, b) +} +func (m *CreateInvoicesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateInvoicesResponse.Marshal(b, m, deterministic) +} +func (m *CreateInvoicesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateInvoicesResponse.Merge(m, src) +} +func (m *CreateInvoicesResponse) XXX_Size() int { + return xxx_messageInfo_CreateInvoicesResponse.Size(m) +} +func (m *CreateInvoicesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateInvoicesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateInvoicesResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PrepareInvoiceRecordsRequest)(nil), "nodestats.PrepareInvoiceRecordsRequest") + proto.RegisterType((*PrepareInvoiceRecordsResponse)(nil), "nodestats.PrepareInvoiceRecordsResponse") + proto.RegisterType((*ApplyInvoiceRecordsRequest)(nil), "nodestats.ApplyInvoiceRecordsRequest") + proto.RegisterType((*ApplyInvoiceRecordsResponse)(nil), "nodestats.ApplyInvoiceRecordsResponse") + proto.RegisterType((*ApplyInvoiceCouponsRequest)(nil), "nodestats.ApplyInvoiceCouponsRequest") + proto.RegisterType((*ApplyInvoiceCouponsResponse)(nil), "nodestats.ApplyInvoiceCouponsResponse") + proto.RegisterType((*ApplyInvoiceCreditsRequest)(nil), "nodestats.ApplyInvoiceCreditsRequest") + proto.RegisterType((*ApplyInvoiceCreditsResponse)(nil), "nodestats.ApplyInvoiceCreditsResponse") + proto.RegisterType((*CreateInvoicesRequest)(nil), "nodestats.CreateInvoicesRequest") + proto.RegisterType((*CreateInvoicesResponse)(nil), "nodestats.CreateInvoicesResponse") +} + +func init() { proto.RegisterFile("payments.proto", fileDescriptor_a9566e6e864d2854) } + +var fileDescriptor_a9566e6e864d2854 = []byte{ + // 341 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xcf, 0x4e, 0xfa, 0x40, + 0x10, 0xc7, 0x7f, 0xe4, 0x97, 0x10, 0x5c, 0x13, 0x0e, 0xab, 0x28, 0x59, 0x21, 0xc5, 0x26, 0x2a, + 0xa7, 0x6d, 0x82, 0x57, 0x2f, 0xc2, 0xc9, 0x1b, 0x21, 0x7a, 0x31, 0x5e, 0x0a, 0x1d, 0x9b, 0x12, + 0xda, 0x59, 0x77, 0x17, 0x13, 0xde, 0xc2, 0xc7, 0xf2, 0x29, 0xf4, 0x51, 0x34, 0xb0, 0x6d, 0x43, + 0x71, 0x17, 0x8e, 0x9d, 0xf9, 0xfe, 0x49, 0xe7, 0x93, 0x25, 0x4d, 0x11, 0xae, 0x52, 0xc8, 0xb4, + 0xe2, 0x42, 0xa2, 0x46, 0x7a, 0x94, 0x61, 0x04, 0x4a, 0x87, 0x5a, 0x31, 0x12, 0x63, 0x8c, 0x66, + 0xcc, 0xbc, 0x18, 0x31, 0x5e, 0x40, 0xb0, 0xf9, 0x9a, 0x2e, 0x5f, 0x03, 0x9d, 0xa4, 0x6b, 0x59, + 0x2a, 0x8c, 0xc0, 0x7f, 0x21, 0x9d, 0xb1, 0x04, 0x11, 0x4a, 0x78, 0xc8, 0xde, 0x31, 0x99, 0xc1, + 0x04, 0x66, 0x28, 0x23, 0x35, 0x81, 0xb7, 0x25, 0x28, 0x4d, 0xef, 0x48, 0x5d, 0x80, 0x4c, 0x30, + 0x6a, 0xd7, 0x7a, 0xb5, 0xfe, 0xf1, 0x80, 0x71, 0x93, 0xc8, 0x8b, 0x44, 0xfe, 0x58, 0x24, 0x0e, + 0x1b, 0x9f, 0x5f, 0xde, 0xbf, 0x8f, 0x6f, 0xaf, 0x36, 0xc9, 0x3d, 0xbe, 0x47, 0xba, 0x8e, 0x74, + 0x25, 0x30, 0x53, 0xe0, 0x77, 0x08, 0xbb, 0x17, 0x62, 0xb1, 0xb2, 0x96, 0xfb, 0x5d, 0x72, 0x61, + 0xdd, 0xda, 0xcd, 0x23, 0x5c, 0xae, 0xe7, 0x0e, 0x73, 0xb9, 0x75, 0x98, 0x25, 0x44, 0x89, 0x76, + 0x9a, 0x8b, 0x6d, 0x6e, 0x3e, 0x27, 0xad, 0x91, 0x84, 0x50, 0x17, 0xbf, 0x55, 0xfa, 0xda, 0xe4, + 0x6c, 0x77, 0x61, 0x2c, 0x83, 0x9f, 0xff, 0xa4, 0x31, 0xce, 0x99, 0xd1, 0x39, 0x69, 0x59, 0xef, + 0x42, 0x6f, 0x78, 0xc9, 0x91, 0xef, 0xe3, 0xc2, 0xfa, 0x87, 0x85, 0xa6, 0x98, 0x46, 0xe4, 0xc4, + 0x72, 0x44, 0x7a, 0xb5, 0x15, 0xe0, 0x46, 0xc0, 0xae, 0x0f, 0xc9, 0xec, 0x2d, 0xf9, 0xb5, 0x9d, + 0x2d, 0x55, 0x56, 0xce, 0x96, 0x1d, 0x68, 0x7f, 0x5a, 0x0c, 0x16, 0x77, 0x4b, 0x05, 0xaa, 0xbb, + 0xa5, 0x4a, 0x97, 0x3e, 0x91, 0x66, 0x15, 0x22, 0xed, 0x6d, 0x39, 0xad, 0xe0, 0xd9, 0xe5, 0x1e, + 0x85, 0x89, 0x1d, 0x9e, 0x3e, 0x53, 0xa5, 0x51, 0xce, 0x79, 0x82, 0xc1, 0x0c, 0xd3, 0x14, 0xb3, + 0x40, 0x4c, 0xa7, 0xf5, 0xcd, 0x43, 0xba, 0xfd, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x32, 0x6d, 0x84, + 0xad, 0xd1, 0x03, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCPaymentsClient interface { + DRPCConn() drpc.Conn + + PrepareInvoiceRecords(ctx context.Context, in *PrepareInvoiceRecordsRequest) (*PrepareInvoiceRecordsResponse, error) + ApplyInvoiceRecords(ctx context.Context, in *ApplyInvoiceRecordsRequest) (*ApplyInvoiceRecordsResponse, error) + ApplyInvoiceCoupons(ctx context.Context, in *ApplyInvoiceCouponsRequest) (*ApplyInvoiceCouponsResponse, error) + ApplyInvoiceCredits(ctx context.Context, in *ApplyInvoiceCreditsRequest) (*ApplyInvoiceCreditsResponse, error) + CreateInvoices(ctx context.Context, in *CreateInvoicesRequest) (*CreateInvoicesResponse, error) +} + +type drpcPaymentsClient struct { + cc drpc.Conn +} + +func NewDRPCPaymentsClient(cc drpc.Conn) DRPCPaymentsClient { + return &drpcPaymentsClient{cc} +} + +func (c *drpcPaymentsClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcPaymentsClient) PrepareInvoiceRecords(ctx context.Context, in *PrepareInvoiceRecordsRequest) (*PrepareInvoiceRecordsResponse, error) { + out := new(PrepareInvoiceRecordsResponse) + err := c.cc.Invoke(ctx, "/nodestats.Payments/PrepareInvoiceRecords", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPaymentsClient) ApplyInvoiceRecords(ctx context.Context, in *ApplyInvoiceRecordsRequest) (*ApplyInvoiceRecordsResponse, error) { + out := new(ApplyInvoiceRecordsResponse) + err := c.cc.Invoke(ctx, "/nodestats.Payments/ApplyInvoiceRecords", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPaymentsClient) ApplyInvoiceCoupons(ctx context.Context, in *ApplyInvoiceCouponsRequest) (*ApplyInvoiceCouponsResponse, error) { + out := new(ApplyInvoiceCouponsResponse) + err := c.cc.Invoke(ctx, "/nodestats.Payments/ApplyInvoiceCoupons", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPaymentsClient) ApplyInvoiceCredits(ctx context.Context, in *ApplyInvoiceCreditsRequest) (*ApplyInvoiceCreditsResponse, error) { + out := new(ApplyInvoiceCreditsResponse) + err := c.cc.Invoke(ctx, "/nodestats.Payments/ApplyInvoiceCredits", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPaymentsClient) CreateInvoices(ctx context.Context, in *CreateInvoicesRequest) (*CreateInvoicesResponse, error) { + out := new(CreateInvoicesResponse) + err := c.cc.Invoke(ctx, "/nodestats.Payments/CreateInvoices", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCPaymentsServer interface { + PrepareInvoiceRecords(context.Context, *PrepareInvoiceRecordsRequest) (*PrepareInvoiceRecordsResponse, error) + ApplyInvoiceRecords(context.Context, *ApplyInvoiceRecordsRequest) (*ApplyInvoiceRecordsResponse, error) + ApplyInvoiceCoupons(context.Context, *ApplyInvoiceCouponsRequest) (*ApplyInvoiceCouponsResponse, error) + ApplyInvoiceCredits(context.Context, *ApplyInvoiceCreditsRequest) (*ApplyInvoiceCreditsResponse, error) + CreateInvoices(context.Context, *CreateInvoicesRequest) (*CreateInvoicesResponse, error) +} + +type DRPCPaymentsDescription struct{} + +func (DRPCPaymentsDescription) NumMethods() int { return 5 } + +func (DRPCPaymentsDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/nodestats.Payments/PrepareInvoiceRecords", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPaymentsServer). + PrepareInvoiceRecords( + ctx, + in1.(*PrepareInvoiceRecordsRequest), + ) + }, DRPCPaymentsServer.PrepareInvoiceRecords, true + case 1: + return "/nodestats.Payments/ApplyInvoiceRecords", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPaymentsServer). + ApplyInvoiceRecords( + ctx, + in1.(*ApplyInvoiceRecordsRequest), + ) + }, DRPCPaymentsServer.ApplyInvoiceRecords, true + case 2: + return "/nodestats.Payments/ApplyInvoiceCoupons", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPaymentsServer). + ApplyInvoiceCoupons( + ctx, + in1.(*ApplyInvoiceCouponsRequest), + ) + }, DRPCPaymentsServer.ApplyInvoiceCoupons, true + case 3: + return "/nodestats.Payments/ApplyInvoiceCredits", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPaymentsServer). + ApplyInvoiceCredits( + ctx, + in1.(*ApplyInvoiceCreditsRequest), + ) + }, DRPCPaymentsServer.ApplyInvoiceCredits, true + case 4: + return "/nodestats.Payments/CreateInvoices", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPaymentsServer). + CreateInvoices( + ctx, + in1.(*CreateInvoicesRequest), + ) + }, DRPCPaymentsServer.CreateInvoices, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterPayments(mux drpc.Mux, impl DRPCPaymentsServer) error { + return mux.Register(impl, DRPCPaymentsDescription{}) +} + +type DRPCPayments_PrepareInvoiceRecordsStream interface { + drpc.Stream + SendAndClose(*PrepareInvoiceRecordsResponse) error +} + +type drpcPaymentsPrepareInvoiceRecordsStream struct { + drpc.Stream +} + +func (x *drpcPaymentsPrepareInvoiceRecordsStream) SendAndClose(m *PrepareInvoiceRecordsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPayments_ApplyInvoiceRecordsStream interface { + drpc.Stream + SendAndClose(*ApplyInvoiceRecordsResponse) error +} + +type drpcPaymentsApplyInvoiceRecordsStream struct { + drpc.Stream +} + +func (x *drpcPaymentsApplyInvoiceRecordsStream) SendAndClose(m *ApplyInvoiceRecordsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPayments_ApplyInvoiceCouponsStream interface { + drpc.Stream + SendAndClose(*ApplyInvoiceCouponsResponse) error +} + +type drpcPaymentsApplyInvoiceCouponsStream struct { + drpc.Stream +} + +func (x *drpcPaymentsApplyInvoiceCouponsStream) SendAndClose(m *ApplyInvoiceCouponsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPayments_ApplyInvoiceCreditsStream interface { + drpc.Stream + SendAndClose(*ApplyInvoiceCreditsResponse) error +} + +type drpcPaymentsApplyInvoiceCreditsStream struct { + drpc.Stream +} + +func (x *drpcPaymentsApplyInvoiceCreditsStream) SendAndClose(m *ApplyInvoiceCreditsResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPayments_CreateInvoicesStream interface { + drpc.Stream + SendAndClose(*CreateInvoicesResponse) error +} + +type drpcPaymentsCreateInvoicesStream struct { + drpc.Stream +} + +func (x *drpcPaymentsCreateInvoicesStream) SendAndClose(m *CreateInvoicesResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/payments.proto b/vendor/storj.io/common/pb/payments.proto new file mode 100644 index 000000000..c4942d568 --- /dev/null +++ b/vendor/storj.io/common/pb/payments.proto @@ -0,0 +1,36 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package nodestats; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +service Payments { + rpc PrepareInvoiceRecords(PrepareInvoiceRecordsRequest) returns (PrepareInvoiceRecordsResponse); + rpc ApplyInvoiceRecords(ApplyInvoiceRecordsRequest) returns (ApplyInvoiceRecordsResponse); + rpc ApplyInvoiceCoupons(ApplyInvoiceCouponsRequest) returns (ApplyInvoiceCouponsResponse); + rpc ApplyInvoiceCredits(ApplyInvoiceCreditsRequest) returns (ApplyInvoiceCreditsResponse); + rpc CreateInvoices(CreateInvoicesRequest) returns (CreateInvoicesResponse); +} + +message PrepareInvoiceRecordsRequest { + google.protobuf.Timestamp period = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; +} + +message PrepareInvoiceRecordsResponse {} + +message ApplyInvoiceRecordsRequest {} +message ApplyInvoiceRecordsResponse {} + +message ApplyInvoiceCouponsRequest {} +message ApplyInvoiceCouponsResponse {} + +message ApplyInvoiceCreditsRequest {} +message ApplyInvoiceCreditsResponse {} + +message CreateInvoicesRequest {} +message CreateInvoicesResponse {} diff --git a/vendor/storj.io/common/pb/piecestore2.pb.go b/vendor/storj.io/common/pb/piecestore2.pb.go new file mode 100644 index 000000000..20acb0696 --- /dev/null +++ b/vendor/storj.io/common/pb/piecestore2.pb.go @@ -0,0 +1,1165 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: piecestore2.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PieceHeader_FormatVersion int32 + +const ( + PieceHeader_FORMAT_V0 PieceHeader_FormatVersion = 0 + PieceHeader_FORMAT_V1 PieceHeader_FormatVersion = 1 +) + +var PieceHeader_FormatVersion_name = map[int32]string{ + 0: "FORMAT_V0", + 1: "FORMAT_V1", +} + +var PieceHeader_FormatVersion_value = map[string]int32{ + "FORMAT_V0": 0, + "FORMAT_V1": 1, +} + +func (x PieceHeader_FormatVersion) String() string { + return proto.EnumName(PieceHeader_FormatVersion_name, int32(x)) +} + +func (PieceHeader_FormatVersion) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{12, 0} +} + +// Expected order of messages from uplink: +// OrderLimit -> +// repeated +// Order -> +// Chunk -> +// PieceHash signed by uplink -> +// <- PieceHash signed by storage node +// +type PieceUploadRequest struct { + // first message to show that we are allowed to upload + Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` + // order for uploading + Order *Order `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"` + Chunk *PieceUploadRequest_Chunk `protobuf:"bytes,3,opt,name=chunk,proto3" json:"chunk,omitempty"` + // final message + Done *PieceHash `protobuf:"bytes,4,opt,name=done,proto3" json:"done,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceUploadRequest) Reset() { *m = PieceUploadRequest{} } +func (m *PieceUploadRequest) String() string { return proto.CompactTextString(m) } +func (*PieceUploadRequest) ProtoMessage() {} +func (*PieceUploadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{0} +} +func (m *PieceUploadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceUploadRequest.Unmarshal(m, b) +} +func (m *PieceUploadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceUploadRequest.Marshal(b, m, deterministic) +} +func (m *PieceUploadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceUploadRequest.Merge(m, src) +} +func (m *PieceUploadRequest) XXX_Size() int { + return xxx_messageInfo_PieceUploadRequest.Size(m) +} +func (m *PieceUploadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PieceUploadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceUploadRequest proto.InternalMessageInfo + +func (m *PieceUploadRequest) GetLimit() *OrderLimit { + if m != nil { + return m.Limit + } + return nil +} + +func (m *PieceUploadRequest) GetOrder() *Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *PieceUploadRequest) GetChunk() *PieceUploadRequest_Chunk { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *PieceUploadRequest) GetDone() *PieceHash { + if m != nil { + return m.Done + } + return nil +} + +// data message +type PieceUploadRequest_Chunk struct { + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceUploadRequest_Chunk) Reset() { *m = PieceUploadRequest_Chunk{} } +func (m *PieceUploadRequest_Chunk) String() string { return proto.CompactTextString(m) } +func (*PieceUploadRequest_Chunk) ProtoMessage() {} +func (*PieceUploadRequest_Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{0, 0} +} +func (m *PieceUploadRequest_Chunk) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceUploadRequest_Chunk.Unmarshal(m, b) +} +func (m *PieceUploadRequest_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceUploadRequest_Chunk.Marshal(b, m, deterministic) +} +func (m *PieceUploadRequest_Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceUploadRequest_Chunk.Merge(m, src) +} +func (m *PieceUploadRequest_Chunk) XXX_Size() int { + return xxx_messageInfo_PieceUploadRequest_Chunk.Size(m) +} +func (m *PieceUploadRequest_Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_PieceUploadRequest_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceUploadRequest_Chunk proto.InternalMessageInfo + +func (m *PieceUploadRequest_Chunk) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *PieceUploadRequest_Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type PieceUploadResponse struct { + Done *PieceHash `protobuf:"bytes,1,opt,name=done,proto3" json:"done,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceUploadResponse) Reset() { *m = PieceUploadResponse{} } +func (m *PieceUploadResponse) String() string { return proto.CompactTextString(m) } +func (*PieceUploadResponse) ProtoMessage() {} +func (*PieceUploadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{1} +} +func (m *PieceUploadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceUploadResponse.Unmarshal(m, b) +} +func (m *PieceUploadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceUploadResponse.Marshal(b, m, deterministic) +} +func (m *PieceUploadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceUploadResponse.Merge(m, src) +} +func (m *PieceUploadResponse) XXX_Size() int { + return xxx_messageInfo_PieceUploadResponse.Size(m) +} +func (m *PieceUploadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PieceUploadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceUploadResponse proto.InternalMessageInfo + +func (m *PieceUploadResponse) GetDone() *PieceHash { + if m != nil { + return m.Done + } + return nil +} + +// Expected order of messages from uplink: +// {OrderLimit, Chunk} -> +// go repeated +// Order -> (async) +// go repeated +// <- PieceDownloadResponse.Chunk +type PieceDownloadRequest struct { + // first message to show that we are allowed to upload + Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` + // order for downloading + Order *Order `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"` + // request for the chunk + Chunk *PieceDownloadRequest_Chunk `protobuf:"bytes,3,opt,name=chunk,proto3" json:"chunk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceDownloadRequest) Reset() { *m = PieceDownloadRequest{} } +func (m *PieceDownloadRequest) String() string { return proto.CompactTextString(m) } +func (*PieceDownloadRequest) ProtoMessage() {} +func (*PieceDownloadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{2} +} +func (m *PieceDownloadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceDownloadRequest.Unmarshal(m, b) +} +func (m *PieceDownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceDownloadRequest.Marshal(b, m, deterministic) +} +func (m *PieceDownloadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceDownloadRequest.Merge(m, src) +} +func (m *PieceDownloadRequest) XXX_Size() int { + return xxx_messageInfo_PieceDownloadRequest.Size(m) +} +func (m *PieceDownloadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PieceDownloadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceDownloadRequest proto.InternalMessageInfo + +func (m *PieceDownloadRequest) GetLimit() *OrderLimit { + if m != nil { + return m.Limit + } + return nil +} + +func (m *PieceDownloadRequest) GetOrder() *Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *PieceDownloadRequest) GetChunk() *PieceDownloadRequest_Chunk { + if m != nil { + return m.Chunk + } + return nil +} + +// Chunk that we wish to download +type PieceDownloadRequest_Chunk struct { + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + ChunkSize int64 `protobuf:"varint,2,opt,name=chunk_size,json=chunkSize,proto3" json:"chunk_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceDownloadRequest_Chunk) Reset() { *m = PieceDownloadRequest_Chunk{} } +func (m *PieceDownloadRequest_Chunk) String() string { return proto.CompactTextString(m) } +func (*PieceDownloadRequest_Chunk) ProtoMessage() {} +func (*PieceDownloadRequest_Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{2, 0} +} +func (m *PieceDownloadRequest_Chunk) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceDownloadRequest_Chunk.Unmarshal(m, b) +} +func (m *PieceDownloadRequest_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceDownloadRequest_Chunk.Marshal(b, m, deterministic) +} +func (m *PieceDownloadRequest_Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceDownloadRequest_Chunk.Merge(m, src) +} +func (m *PieceDownloadRequest_Chunk) XXX_Size() int { + return xxx_messageInfo_PieceDownloadRequest_Chunk.Size(m) +} +func (m *PieceDownloadRequest_Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_PieceDownloadRequest_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceDownloadRequest_Chunk proto.InternalMessageInfo + +func (m *PieceDownloadRequest_Chunk) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *PieceDownloadRequest_Chunk) GetChunkSize() int64 { + if m != nil { + return m.ChunkSize + } + return 0 +} + +type PieceDownloadResponse struct { + Chunk *PieceDownloadResponse_Chunk `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` + Hash *PieceHash `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + Limit *OrderLimit `protobuf:"bytes,3,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceDownloadResponse) Reset() { *m = PieceDownloadResponse{} } +func (m *PieceDownloadResponse) String() string { return proto.CompactTextString(m) } +func (*PieceDownloadResponse) ProtoMessage() {} +func (*PieceDownloadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{3} +} +func (m *PieceDownloadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceDownloadResponse.Unmarshal(m, b) +} +func (m *PieceDownloadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceDownloadResponse.Marshal(b, m, deterministic) +} +func (m *PieceDownloadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceDownloadResponse.Merge(m, src) +} +func (m *PieceDownloadResponse) XXX_Size() int { + return xxx_messageInfo_PieceDownloadResponse.Size(m) +} +func (m *PieceDownloadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PieceDownloadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceDownloadResponse proto.InternalMessageInfo + +func (m *PieceDownloadResponse) GetChunk() *PieceDownloadResponse_Chunk { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *PieceDownloadResponse) GetHash() *PieceHash { + if m != nil { + return m.Hash + } + return nil +} + +func (m *PieceDownloadResponse) GetLimit() *OrderLimit { + if m != nil { + return m.Limit + } + return nil +} + +// Chunk response for download request +type PieceDownloadResponse_Chunk struct { + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceDownloadResponse_Chunk) Reset() { *m = PieceDownloadResponse_Chunk{} } +func (m *PieceDownloadResponse_Chunk) String() string { return proto.CompactTextString(m) } +func (*PieceDownloadResponse_Chunk) ProtoMessage() {} +func (*PieceDownloadResponse_Chunk) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{3, 0} +} +func (m *PieceDownloadResponse_Chunk) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceDownloadResponse_Chunk.Unmarshal(m, b) +} +func (m *PieceDownloadResponse_Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceDownloadResponse_Chunk.Marshal(b, m, deterministic) +} +func (m *PieceDownloadResponse_Chunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceDownloadResponse_Chunk.Merge(m, src) +} +func (m *PieceDownloadResponse_Chunk) XXX_Size() int { + return xxx_messageInfo_PieceDownloadResponse_Chunk.Size(m) +} +func (m *PieceDownloadResponse_Chunk) XXX_DiscardUnknown() { + xxx_messageInfo_PieceDownloadResponse_Chunk.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceDownloadResponse_Chunk proto.InternalMessageInfo + +func (m *PieceDownloadResponse_Chunk) GetOffset() int64 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *PieceDownloadResponse_Chunk) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +type PieceDeleteRequest struct { + Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceDeleteRequest) Reset() { *m = PieceDeleteRequest{} } +func (m *PieceDeleteRequest) String() string { return proto.CompactTextString(m) } +func (*PieceDeleteRequest) ProtoMessage() {} +func (*PieceDeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{4} +} +func (m *PieceDeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceDeleteRequest.Unmarshal(m, b) +} +func (m *PieceDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceDeleteRequest.Marshal(b, m, deterministic) +} +func (m *PieceDeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceDeleteRequest.Merge(m, src) +} +func (m *PieceDeleteRequest) XXX_Size() int { + return xxx_messageInfo_PieceDeleteRequest.Size(m) +} +func (m *PieceDeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PieceDeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceDeleteRequest proto.InternalMessageInfo + +func (m *PieceDeleteRequest) GetLimit() *OrderLimit { + if m != nil { + return m.Limit + } + return nil +} + +type PieceDeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceDeleteResponse) Reset() { *m = PieceDeleteResponse{} } +func (m *PieceDeleteResponse) String() string { return proto.CompactTextString(m) } +func (*PieceDeleteResponse) ProtoMessage() {} +func (*PieceDeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{5} +} +func (m *PieceDeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceDeleteResponse.Unmarshal(m, b) +} +func (m *PieceDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceDeleteResponse.Marshal(b, m, deterministic) +} +func (m *PieceDeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceDeleteResponse.Merge(m, src) +} +func (m *PieceDeleteResponse) XXX_Size() int { + return xxx_messageInfo_PieceDeleteResponse.Size(m) +} +func (m *PieceDeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PieceDeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceDeleteResponse proto.InternalMessageInfo + +type DeletePiecesRequest struct { + PieceIds []PieceID `protobuf:"bytes,1,rep,name=piece_ids,json=pieceIds,proto3,customtype=PieceID" json:"piece_ids"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeletePiecesRequest) Reset() { *m = DeletePiecesRequest{} } +func (m *DeletePiecesRequest) String() string { return proto.CompactTextString(m) } +func (*DeletePiecesRequest) ProtoMessage() {} +func (*DeletePiecesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{6} +} +func (m *DeletePiecesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeletePiecesRequest.Unmarshal(m, b) +} +func (m *DeletePiecesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeletePiecesRequest.Marshal(b, m, deterministic) +} +func (m *DeletePiecesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeletePiecesRequest.Merge(m, src) +} +func (m *DeletePiecesRequest) XXX_Size() int { + return xxx_messageInfo_DeletePiecesRequest.Size(m) +} +func (m *DeletePiecesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeletePiecesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeletePiecesRequest proto.InternalMessageInfo + +type DeletePiecesResponse struct { + UnhandledCount int64 `protobuf:"varint,1,opt,name=unhandled_count,json=unhandledCount,proto3" json:"unhandled_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeletePiecesResponse) Reset() { *m = DeletePiecesResponse{} } +func (m *DeletePiecesResponse) String() string { return proto.CompactTextString(m) } +func (*DeletePiecesResponse) ProtoMessage() {} +func (*DeletePiecesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{7} +} +func (m *DeletePiecesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeletePiecesResponse.Unmarshal(m, b) +} +func (m *DeletePiecesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeletePiecesResponse.Marshal(b, m, deterministic) +} +func (m *DeletePiecesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeletePiecesResponse.Merge(m, src) +} +func (m *DeletePiecesResponse) XXX_Size() int { + return xxx_messageInfo_DeletePiecesResponse.Size(m) +} +func (m *DeletePiecesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeletePiecesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeletePiecesResponse proto.InternalMessageInfo + +func (m *DeletePiecesResponse) GetUnhandledCount() int64 { + if m != nil { + return m.UnhandledCount + } + return 0 +} + +type RetainRequest struct { + CreationDate time.Time `protobuf:"bytes,1,opt,name=creation_date,json=creationDate,proto3,stdtime" json:"creation_date"` + Filter []byte `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RetainRequest) Reset() { *m = RetainRequest{} } +func (m *RetainRequest) String() string { return proto.CompactTextString(m) } +func (*RetainRequest) ProtoMessage() {} +func (*RetainRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{8} +} +func (m *RetainRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RetainRequest.Unmarshal(m, b) +} +func (m *RetainRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RetainRequest.Marshal(b, m, deterministic) +} +func (m *RetainRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetainRequest.Merge(m, src) +} +func (m *RetainRequest) XXX_Size() int { + return xxx_messageInfo_RetainRequest.Size(m) +} +func (m *RetainRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RetainRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RetainRequest proto.InternalMessageInfo + +func (m *RetainRequest) GetCreationDate() time.Time { + if m != nil { + return m.CreationDate + } + return time.Time{} +} + +func (m *RetainRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +type RetainResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RetainResponse) Reset() { *m = RetainResponse{} } +func (m *RetainResponse) String() string { return proto.CompactTextString(m) } +func (*RetainResponse) ProtoMessage() {} +func (*RetainResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{9} +} +func (m *RetainResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RetainResponse.Unmarshal(m, b) +} +func (m *RetainResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RetainResponse.Marshal(b, m, deterministic) +} +func (m *RetainResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RetainResponse.Merge(m, src) +} +func (m *RetainResponse) XXX_Size() int { + return xxx_messageInfo_RetainResponse.Size(m) +} +func (m *RetainResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RetainResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RetainResponse proto.InternalMessageInfo + +type RestoreTrashRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreTrashRequest) Reset() { *m = RestoreTrashRequest{} } +func (m *RestoreTrashRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreTrashRequest) ProtoMessage() {} +func (*RestoreTrashRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{10} +} +func (m *RestoreTrashRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreTrashRequest.Unmarshal(m, b) +} +func (m *RestoreTrashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreTrashRequest.Marshal(b, m, deterministic) +} +func (m *RestoreTrashRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreTrashRequest.Merge(m, src) +} +func (m *RestoreTrashRequest) XXX_Size() int { + return xxx_messageInfo_RestoreTrashRequest.Size(m) +} +func (m *RestoreTrashRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreTrashRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreTrashRequest proto.InternalMessageInfo + +type RestoreTrashResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreTrashResponse) Reset() { *m = RestoreTrashResponse{} } +func (m *RestoreTrashResponse) String() string { return proto.CompactTextString(m) } +func (*RestoreTrashResponse) ProtoMessage() {} +func (*RestoreTrashResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{11} +} +func (m *RestoreTrashResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreTrashResponse.Unmarshal(m, b) +} +func (m *RestoreTrashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreTrashResponse.Marshal(b, m, deterministic) +} +func (m *RestoreTrashResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreTrashResponse.Merge(m, src) +} +func (m *RestoreTrashResponse) XXX_Size() int { + return xxx_messageInfo_RestoreTrashResponse.Size(m) +} +func (m *RestoreTrashResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreTrashResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreTrashResponse proto.InternalMessageInfo + +// PieceHeader is used in piece storage to keep track of piece attributes. +type PieceHeader struct { + // the storage format version being used for this piece. The piece filename should agree with this. + // The inclusion of this field is intended to aid repairability when filenames are damaged. + FormatVersion PieceHeader_FormatVersion `protobuf:"varint,1,opt,name=format_version,json=formatVersion,proto3,enum=piecestore.PieceHeader_FormatVersion" json:"format_version,omitempty"` + // content hash of the piece + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + // timestamp when upload occurred, as given by the "timestamp" field in the original orders.PieceHash + CreationTime time.Time `protobuf:"bytes,3,opt,name=creation_time,json=creationTime,proto3,stdtime" json:"creation_time"` + // signature from uplink over the original orders.PieceHash (the corresponding PieceHashSigning + // is reconstructable using the piece id from the piecestore, the piece size from the + // filesystem (minus the piece header size), and these (hash, upload_time, signature) fields). + Signature []byte `protobuf:"bytes,4,opt,name=signature,proto3" json:"signature,omitempty"` + // the OrderLimit authorizing storage of this piece, as signed by the satellite and sent by + // the uplink + OrderLimit OrderLimit `protobuf:"bytes,5,opt,name=order_limit,json=orderLimit,proto3" json:"order_limit"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PieceHeader) Reset() { *m = PieceHeader{} } +func (m *PieceHeader) String() string { return proto.CompactTextString(m) } +func (*PieceHeader) ProtoMessage() {} +func (*PieceHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_23ff32dd550c2439, []int{12} +} +func (m *PieceHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PieceHeader.Unmarshal(m, b) +} +func (m *PieceHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PieceHeader.Marshal(b, m, deterministic) +} +func (m *PieceHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PieceHeader.Merge(m, src) +} +func (m *PieceHeader) XXX_Size() int { + return xxx_messageInfo_PieceHeader.Size(m) +} +func (m *PieceHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PieceHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_PieceHeader proto.InternalMessageInfo + +func (m *PieceHeader) GetFormatVersion() PieceHeader_FormatVersion { + if m != nil { + return m.FormatVersion + } + return PieceHeader_FORMAT_V0 +} + +func (m *PieceHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *PieceHeader) GetCreationTime() time.Time { + if m != nil { + return m.CreationTime + } + return time.Time{} +} + +func (m *PieceHeader) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func (m *PieceHeader) GetOrderLimit() OrderLimit { + if m != nil { + return m.OrderLimit + } + return OrderLimit{} +} + +func init() { + proto.RegisterEnum("piecestore.PieceHeader_FormatVersion", PieceHeader_FormatVersion_name, PieceHeader_FormatVersion_value) + proto.RegisterType((*PieceUploadRequest)(nil), "piecestore.PieceUploadRequest") + proto.RegisterType((*PieceUploadRequest_Chunk)(nil), "piecestore.PieceUploadRequest.Chunk") + proto.RegisterType((*PieceUploadResponse)(nil), "piecestore.PieceUploadResponse") + proto.RegisterType((*PieceDownloadRequest)(nil), "piecestore.PieceDownloadRequest") + proto.RegisterType((*PieceDownloadRequest_Chunk)(nil), "piecestore.PieceDownloadRequest.Chunk") + proto.RegisterType((*PieceDownloadResponse)(nil), "piecestore.PieceDownloadResponse") + proto.RegisterType((*PieceDownloadResponse_Chunk)(nil), "piecestore.PieceDownloadResponse.Chunk") + proto.RegisterType((*PieceDeleteRequest)(nil), "piecestore.PieceDeleteRequest") + proto.RegisterType((*PieceDeleteResponse)(nil), "piecestore.PieceDeleteResponse") + proto.RegisterType((*DeletePiecesRequest)(nil), "piecestore.DeletePiecesRequest") + proto.RegisterType((*DeletePiecesResponse)(nil), "piecestore.DeletePiecesResponse") + proto.RegisterType((*RetainRequest)(nil), "piecestore.RetainRequest") + proto.RegisterType((*RetainResponse)(nil), "piecestore.RetainResponse") + proto.RegisterType((*RestoreTrashRequest)(nil), "piecestore.RestoreTrashRequest") + proto.RegisterType((*RestoreTrashResponse)(nil), "piecestore.RestoreTrashResponse") + proto.RegisterType((*PieceHeader)(nil), "piecestore.PieceHeader") +} + +func init() { proto.RegisterFile("piecestore2.proto", fileDescriptor_23ff32dd550c2439) } + +var fileDescriptor_23ff32dd550c2439 = []byte{ + // 786 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0x4d, 0x4f, 0x13, 0x5d, + 0x14, 0xee, 0xd0, 0x8f, 0x97, 0x1e, 0xa6, 0x05, 0x2e, 0x85, 0xf4, 0x9d, 0xbc, 0xaf, 0xad, 0xa3, + 0x48, 0x17, 0x3a, 0xc5, 0xb2, 0xd2, 0x20, 0xc4, 0xd2, 0x10, 0x49, 0x40, 0xf0, 0xf2, 0xb1, 0x70, + 0xd3, 0x0c, 0x9d, 0xdb, 0x76, 0xb4, 0x9d, 0x5b, 0x67, 0x6e, 0x35, 0xe1, 0x17, 0xb8, 0xf4, 0x37, + 0xb9, 0xf2, 0x37, 0x18, 0x83, 0x0b, 0x97, 0xfe, 0x04, 0x37, 0xe6, 0x7e, 0x4c, 0xdb, 0xa1, 0x85, + 0x06, 0x13, 0x57, 0xed, 0x3d, 0xe7, 0x39, 0xf7, 0x3c, 0xf7, 0x99, 0xe7, 0x1c, 0x58, 0xec, 0xb9, + 0xa4, 0x41, 0x02, 0x46, 0x7d, 0x52, 0xb1, 0x7a, 0x3e, 0x65, 0x14, 0xc1, 0x30, 0x64, 0x40, 0x8b, + 0xb6, 0xa8, 0x8c, 0x1b, 0x85, 0x16, 0xa5, 0xad, 0x0e, 0x29, 0x8b, 0xd3, 0x79, 0xbf, 0x59, 0x66, + 0x6e, 0x97, 0x04, 0xcc, 0xee, 0xf6, 0x14, 0x40, 0xa7, 0xbe, 0x43, 0xfc, 0x40, 0x9e, 0xcc, 0x5f, + 0x1a, 0xa0, 0x23, 0x7e, 0xd3, 0x69, 0xaf, 0x43, 0x6d, 0x07, 0x93, 0x77, 0x7d, 0x12, 0x30, 0x54, + 0x82, 0x64, 0xc7, 0xed, 0xba, 0x2c, 0xaf, 0x15, 0xb5, 0xd2, 0x5c, 0x05, 0x59, 0xaa, 0xe8, 0x90, + 0xff, 0xec, 0xf3, 0x0c, 0x96, 0x00, 0x74, 0x0f, 0x92, 0x22, 0x97, 0x9f, 0x11, 0xc8, 0x4c, 0x04, + 0x89, 0x65, 0x0e, 0x3d, 0x85, 0x64, 0xa3, 0xdd, 0xf7, 0xde, 0xe6, 0xe3, 0x02, 0x74, 0xdf, 0x1a, + 0x92, 0xb7, 0xc6, 0xbb, 0x5b, 0x3b, 0x1c, 0x8b, 0x65, 0x09, 0x5a, 0x85, 0x84, 0x43, 0x3d, 0x92, + 0x4f, 0x88, 0xd2, 0xc5, 0xf0, 0x7e, 0x51, 0xf6, 0xc2, 0x0e, 0xda, 0x58, 0xa4, 0x8d, 0x0d, 0x48, + 0x8a, 0x32, 0xb4, 0x02, 0x29, 0xda, 0x6c, 0x06, 0x44, 0x72, 0x8f, 0x63, 0x75, 0x42, 0x08, 0x12, + 0x8e, 0xcd, 0x6c, 0xc1, 0x53, 0xc7, 0xe2, 0xbf, 0xb9, 0x09, 0x4b, 0x91, 0xf6, 0x41, 0x8f, 0x7a, + 0x01, 0x19, 0xb4, 0xd4, 0x6e, 0x6c, 0x69, 0xfe, 0xd0, 0x20, 0x27, 0x62, 0x35, 0xfa, 0xc1, 0xfb, + 0x8b, 0xea, 0x6d, 0x46, 0xd5, 0x7b, 0x30, 0xa6, 0xde, 0x95, 0xfe, 0x11, 0xfd, 0x8c, 0xad, 0x69, + 0xc2, 0xfc, 0x0f, 0x20, 0x90, 0xf5, 0xc0, 0xbd, 0x20, 0x82, 0x48, 0x1c, 0xa7, 0x45, 0xe4, 0xd8, + 0xbd, 0x20, 0xe6, 0x37, 0x0d, 0x96, 0xaf, 0x74, 0x51, 0x32, 0x3d, 0x0b, 0x79, 0xc9, 0x67, 0xae, + 0xdd, 0xc0, 0x4b, 0x56, 0x8c, 0x7d, 0xd8, 0xb6, 0x1d, 0xb4, 0xd5, 0xd3, 0x27, 0xa9, 0xcc, 0xd3, + 0x43, 0x31, 0xe3, 0x53, 0xc4, 0xfc, 0x33, 0x0b, 0x6c, 0x29, 0xff, 0xd7, 0x48, 0x87, 0x30, 0x72, + 0xeb, 0x2f, 0x68, 0x2e, 0x2b, 0x0b, 0x85, 0xf5, 0xf2, 0xa5, 0xe6, 0x0e, 0x2c, 0xc9, 0x88, 0x48, + 0x06, 0xe1, 0xbd, 0x0f, 0x21, 0x2d, 0x44, 0xaa, 0xbb, 0x4e, 0x90, 0xd7, 0x8a, 0xf1, 0x92, 0x5e, + 0x9d, 0xff, 0x72, 0x59, 0x88, 0x7d, 0xbd, 0x2c, 0xfc, 0x23, 0x90, 0x7b, 0x35, 0x3c, 0x2b, 0x10, + 0x7b, 0x4e, 0x60, 0x6e, 0x43, 0x2e, 0x7a, 0x89, 0x12, 0x7e, 0x0d, 0xe6, 0xfb, 0x5e, 0xdb, 0xf6, + 0x9c, 0x0e, 0x71, 0xea, 0x0d, 0xda, 0xf7, 0xc2, 0x87, 0x66, 0x07, 0xe1, 0x1d, 0x1e, 0x35, 0x7d, + 0xc8, 0x60, 0xc2, 0x6c, 0xd7, 0x0b, 0xfb, 0xef, 0x41, 0xa6, 0xe1, 0x13, 0x9b, 0xb9, 0xd4, 0xab, + 0x3b, 0x36, 0x0b, 0x2d, 0x6e, 0x58, 0x72, 0x6b, 0x58, 0xe1, 0xd6, 0xb0, 0x4e, 0xc2, 0xad, 0x51, + 0x9d, 0xe5, 0xfc, 0x3e, 0x7d, 0x2f, 0x68, 0x58, 0x0f, 0x4b, 0x6b, 0x36, 0x23, 0x5c, 0xe4, 0xa6, + 0xdb, 0x61, 0xca, 0xbb, 0x3a, 0x56, 0x27, 0x73, 0x01, 0xb2, 0x61, 0x4f, 0xa5, 0xc5, 0x32, 0x2c, + 0x61, 0x69, 0x8b, 0x13, 0x9f, 0x7f, 0x57, 0xc9, 0xc5, 0x5c, 0x81, 0x5c, 0x34, 0xac, 0xe0, 0x9f, + 0x67, 0x60, 0x4e, 0x9a, 0x80, 0xd8, 0xdc, 0xfe, 0xfb, 0x90, 0x6d, 0x52, 0xbf, 0x6b, 0xb3, 0xfa, + 0x7b, 0xe2, 0x07, 0x2e, 0xf5, 0x04, 0xe9, 0x6c, 0x65, 0x75, 0xcc, 0x6f, 0xb2, 0xc0, 0xda, 0x15, + 0xe8, 0x33, 0x09, 0xc6, 0x99, 0xe6, 0xe8, 0x91, 0x7b, 0x60, 0xe0, 0x3a, 0x5d, 0x59, 0x6c, 0x54, + 0x15, 0xbe, 0x2e, 0x95, 0xd5, 0x6e, 0xa9, 0x0a, 0x4f, 0xa2, 0xff, 0x20, 0x1d, 0xb8, 0x2d, 0xcf, + 0x66, 0x7d, 0x5f, 0xae, 0x2c, 0x1d, 0x0f, 0x03, 0xe8, 0x09, 0xcc, 0x09, 0x23, 0xd5, 0xa5, 0xb9, + 0x92, 0xd7, 0x99, 0xab, 0x9a, 0xe0, 0xd7, 0x63, 0xa0, 0x83, 0x88, 0xf9, 0x08, 0x32, 0x91, 0x77, + 0xa1, 0x0c, 0xa4, 0x77, 0x0f, 0xf1, 0xc1, 0xf3, 0x93, 0xfa, 0xd9, 0xfa, 0x42, 0x6c, 0xf4, 0xf8, + 0x78, 0x41, 0xab, 0xfc, 0x8c, 0x03, 0x1c, 0x0d, 0xe4, 0x41, 0x07, 0x90, 0x92, 0x3b, 0x0e, 0xdd, + 0xb9, 0x79, 0xf7, 0x1a, 0x85, 0x6b, 0xf3, 0xea, 0xf3, 0xc4, 0x4a, 0x1a, 0x3a, 0x85, 0xd9, 0x70, + 0xb6, 0x51, 0x71, 0xda, 0x3a, 0x32, 0xee, 0x4e, 0x5d, 0x0c, 0xfc, 0xd2, 0x75, 0x0d, 0xbd, 0x84, + 0x94, 0xf4, 0xfb, 0x04, 0x96, 0x91, 0xf9, 0x9c, 0xc0, 0xf2, 0xca, 0xfc, 0xc5, 0x3f, 0xce, 0x68, + 0xe8, 0x15, 0xe8, 0xa3, 0xf3, 0x83, 0x22, 0x55, 0x13, 0xc6, 0xd3, 0x28, 0x5e, 0x0f, 0x50, 0xa3, + 0xb7, 0x0d, 0x29, 0xe9, 0x6e, 0xf4, 0xef, 0x28, 0x36, 0x32, 0x65, 0x86, 0x31, 0x29, 0xa5, 0x2e, + 0x38, 0x06, 0x7d, 0xd4, 0xf5, 0x51, 0x4e, 0x13, 0xc6, 0x24, 0xca, 0x69, 0xe2, 0xc0, 0xc4, 0xaa, + 0xb9, 0xd7, 0x88, 0xc7, 0xdf, 0x58, 0x2e, 0x2d, 0x37, 0x68, 0xb7, 0x4b, 0xbd, 0x72, 0xef, 0xfc, + 0x3c, 0x25, 0x7c, 0xbb, 0xf1, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x46, 0xf3, 0x6f, 0x64, 0x3e, 0x08, + 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCPiecestoreClient interface { + DRPCConn() drpc.Conn + + Upload(ctx context.Context) (DRPCPiecestore_UploadClient, error) + Download(ctx context.Context) (DRPCPiecestore_DownloadClient, error) + Delete(ctx context.Context, in *PieceDeleteRequest) (*PieceDeleteResponse, error) + // DeletePieces deletes a set of pieces on satellite request + DeletePieces(ctx context.Context, in *DeletePiecesRequest) (*DeletePiecesResponse, error) + Retain(ctx context.Context, in *RetainRequest) (*RetainResponse, error) + RestoreTrash(ctx context.Context, in *RestoreTrashRequest) (*RestoreTrashResponse, error) +} + +type drpcPiecestoreClient struct { + cc drpc.Conn +} + +func NewDRPCPiecestoreClient(cc drpc.Conn) DRPCPiecestoreClient { + return &drpcPiecestoreClient{cc} +} + +func (c *drpcPiecestoreClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcPiecestoreClient) Upload(ctx context.Context) (DRPCPiecestore_UploadClient, error) { + stream, err := c.cc.NewStream(ctx, "/piecestore.Piecestore/Upload") + if err != nil { + return nil, err + } + x := &drpcPiecestoreUploadClient{stream} + return x, nil +} + +type DRPCPiecestore_UploadClient interface { + drpc.Stream + Send(*PieceUploadRequest) error + CloseAndRecv() (*PieceUploadResponse, error) +} + +type drpcPiecestoreUploadClient struct { + drpc.Stream +} + +func (x *drpcPiecestoreUploadClient) Send(m *PieceUploadRequest) error { + return x.MsgSend(m) +} + +func (x *drpcPiecestoreUploadClient) CloseAndRecv() (*PieceUploadResponse, error) { + if err := x.CloseSend(); err != nil { + return nil, err + } + m := new(PieceUploadResponse) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *drpcPiecestoreClient) Download(ctx context.Context) (DRPCPiecestore_DownloadClient, error) { + stream, err := c.cc.NewStream(ctx, "/piecestore.Piecestore/Download") + if err != nil { + return nil, err + } + x := &drpcPiecestoreDownloadClient{stream} + return x, nil +} + +type DRPCPiecestore_DownloadClient interface { + drpc.Stream + Send(*PieceDownloadRequest) error + Recv() (*PieceDownloadResponse, error) +} + +type drpcPiecestoreDownloadClient struct { + drpc.Stream +} + +func (x *drpcPiecestoreDownloadClient) Send(m *PieceDownloadRequest) error { + return x.MsgSend(m) +} + +func (x *drpcPiecestoreDownloadClient) Recv() (*PieceDownloadResponse, error) { + m := new(PieceDownloadResponse) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *drpcPiecestoreClient) Delete(ctx context.Context, in *PieceDeleteRequest) (*PieceDeleteResponse, error) { + out := new(PieceDeleteResponse) + err := c.cc.Invoke(ctx, "/piecestore.Piecestore/Delete", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPiecestoreClient) DeletePieces(ctx context.Context, in *DeletePiecesRequest) (*DeletePiecesResponse, error) { + out := new(DeletePiecesResponse) + err := c.cc.Invoke(ctx, "/piecestore.Piecestore/DeletePieces", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPiecestoreClient) Retain(ctx context.Context, in *RetainRequest) (*RetainResponse, error) { + out := new(RetainResponse) + err := c.cc.Invoke(ctx, "/piecestore.Piecestore/Retain", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcPiecestoreClient) RestoreTrash(ctx context.Context, in *RestoreTrashRequest) (*RestoreTrashResponse, error) { + out := new(RestoreTrashResponse) + err := c.cc.Invoke(ctx, "/piecestore.Piecestore/RestoreTrash", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCPiecestoreServer interface { + Upload(DRPCPiecestore_UploadStream) error + Download(DRPCPiecestore_DownloadStream) error + Delete(context.Context, *PieceDeleteRequest) (*PieceDeleteResponse, error) + // DeletePieces deletes a set of pieces on satellite request + DeletePieces(context.Context, *DeletePiecesRequest) (*DeletePiecesResponse, error) + Retain(context.Context, *RetainRequest) (*RetainResponse, error) + RestoreTrash(context.Context, *RestoreTrashRequest) (*RestoreTrashResponse, error) +} + +type DRPCPiecestoreDescription struct{} + +func (DRPCPiecestoreDescription) NumMethods() int { return 6 } + +func (DRPCPiecestoreDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/piecestore.Piecestore/Upload", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return nil, srv.(DRPCPiecestoreServer). + Upload( + &drpcPiecestoreUploadStream{in1.(drpc.Stream)}, + ) + }, DRPCPiecestoreServer.Upload, true + case 1: + return "/piecestore.Piecestore/Download", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return nil, srv.(DRPCPiecestoreServer). + Download( + &drpcPiecestoreDownloadStream{in1.(drpc.Stream)}, + ) + }, DRPCPiecestoreServer.Download, true + case 2: + return "/piecestore.Piecestore/Delete", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPiecestoreServer). + Delete( + ctx, + in1.(*PieceDeleteRequest), + ) + }, DRPCPiecestoreServer.Delete, true + case 3: + return "/piecestore.Piecestore/DeletePieces", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPiecestoreServer). + DeletePieces( + ctx, + in1.(*DeletePiecesRequest), + ) + }, DRPCPiecestoreServer.DeletePieces, true + case 4: + return "/piecestore.Piecestore/Retain", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPiecestoreServer). + Retain( + ctx, + in1.(*RetainRequest), + ) + }, DRPCPiecestoreServer.Retain, true + case 5: + return "/piecestore.Piecestore/RestoreTrash", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCPiecestoreServer). + RestoreTrash( + ctx, + in1.(*RestoreTrashRequest), + ) + }, DRPCPiecestoreServer.RestoreTrash, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterPiecestore(mux drpc.Mux, impl DRPCPiecestoreServer) error { + return mux.Register(impl, DRPCPiecestoreDescription{}) +} + +type DRPCPiecestore_UploadStream interface { + drpc.Stream + SendAndClose(*PieceUploadResponse) error + Recv() (*PieceUploadRequest, error) +} + +type drpcPiecestoreUploadStream struct { + drpc.Stream +} + +func (x *drpcPiecestoreUploadStream) SendAndClose(m *PieceUploadResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +func (x *drpcPiecestoreUploadStream) Recv() (*PieceUploadRequest, error) { + m := new(PieceUploadRequest) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +type DRPCPiecestore_DownloadStream interface { + drpc.Stream + Send(*PieceDownloadResponse) error + Recv() (*PieceDownloadRequest, error) +} + +type drpcPiecestoreDownloadStream struct { + drpc.Stream +} + +func (x *drpcPiecestoreDownloadStream) Send(m *PieceDownloadResponse) error { + return x.MsgSend(m) +} + +func (x *drpcPiecestoreDownloadStream) Recv() (*PieceDownloadRequest, error) { + m := new(PieceDownloadRequest) + if err := x.MsgRecv(m); err != nil { + return nil, err + } + return m, nil +} + +type DRPCPiecestore_DeleteStream interface { + drpc.Stream + SendAndClose(*PieceDeleteResponse) error +} + +type drpcPiecestoreDeleteStream struct { + drpc.Stream +} + +func (x *drpcPiecestoreDeleteStream) SendAndClose(m *PieceDeleteResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPiecestore_DeletePiecesStream interface { + drpc.Stream + SendAndClose(*DeletePiecesResponse) error +} + +type drpcPiecestoreDeletePiecesStream struct { + drpc.Stream +} + +func (x *drpcPiecestoreDeletePiecesStream) SendAndClose(m *DeletePiecesResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPiecestore_RetainStream interface { + drpc.Stream + SendAndClose(*RetainResponse) error +} + +type drpcPiecestoreRetainStream struct { + drpc.Stream +} + +func (x *drpcPiecestoreRetainStream) SendAndClose(m *RetainResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCPiecestore_RestoreTrashStream interface { + drpc.Stream + SendAndClose(*RestoreTrashResponse) error +} + +type drpcPiecestoreRestoreTrashStream struct { + drpc.Stream +} + +func (x *drpcPiecestoreRestoreTrashStream) SendAndClose(m *RestoreTrashResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/piecestore2.proto b/vendor/storj.io/common/pb/piecestore2.proto new file mode 100644 index 000000000..7ea2352af --- /dev/null +++ b/vendor/storj.io/common/pb/piecestore2.proto @@ -0,0 +1,132 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package piecestore; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "orders.proto"; + +service Piecestore { + rpc Upload(stream PieceUploadRequest) returns (PieceUploadResponse) {} + rpc Download(stream PieceDownloadRequest) returns (stream PieceDownloadResponse) {} + rpc Delete(PieceDeleteRequest) returns (PieceDeleteResponse) { + option deprecated = true; + } + // DeletePieces deletes a set of pieces on satellite request + rpc DeletePieces(DeletePiecesRequest) returns (DeletePiecesResponse); + rpc Retain(RetainRequest) returns (RetainResponse); + rpc RestoreTrash(RestoreTrashRequest) returns (RestoreTrashResponse) {} +} + +// Expected order of messages from uplink: +// OrderLimit -> +// repeated +// Order -> +// Chunk -> +// PieceHash signed by uplink -> +// <- PieceHash signed by storage node +// +message PieceUploadRequest { + // first message to show that we are allowed to upload + orders.OrderLimit limit = 1; + // order for uploading + orders.Order order = 2; + + // data message + message Chunk { + int64 offset = 1; + bytes data = 2; + } + Chunk chunk = 3; + // final message + orders.PieceHash done = 4; +} + +message PieceUploadResponse { + orders.PieceHash done = 1; +} + +// Expected order of messages from uplink: +// {OrderLimit, Chunk} -> +// go repeated +// Order -> (async) +// go repeated +// <- PieceDownloadResponse.Chunk +message PieceDownloadRequest { + // first message to show that we are allowed to upload + orders.OrderLimit limit = 1; + // order for downloading + orders.Order order = 2; + + // Chunk that we wish to download + message Chunk { + int64 offset = 1; + int64 chunk_size = 2; + } + + // request for the chunk + Chunk chunk = 3; +} + +message PieceDownloadResponse { + // Chunk response for download request + message Chunk { + int64 offset = 1; + bytes data = 2; + } + Chunk chunk = 1; + orders.PieceHash hash = 2; + orders.OrderLimit limit = 3; +} + +message PieceDeleteRequest { + orders.OrderLimit limit = 1; +} + +message PieceDeleteResponse { +} + +message DeletePiecesRequest { + repeated bytes piece_ids = 1 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; +} + +message DeletePiecesResponse { + int64 unhandled_count = 1; +} + +message RetainRequest { + google.protobuf.Timestamp creation_date = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + bytes filter = 2; +} + +message RetainResponse { +} + +message RestoreTrashRequest {} +message RestoreTrashResponse {} + +// PieceHeader is used in piece storage to keep track of piece attributes. +message PieceHeader { + enum FormatVersion { + FORMAT_V0 = 0; + FORMAT_V1 = 1; + } + // the storage format version being used for this piece. The piece filename should agree with this. + // The inclusion of this field is intended to aid repairability when filenames are damaged. + FormatVersion format_version = 1; + // content hash of the piece + bytes hash = 2; + // timestamp when upload occurred, as given by the "timestamp" field in the original orders.PieceHash + google.protobuf.Timestamp creation_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + // signature from uplink over the original orders.PieceHash (the corresponding PieceHashSigning + // is reconstructable using the piece id from the piecestore, the piece size from the + // filesystem (minus the piece header size), and these (hash, upload_time, signature) fields). + bytes signature = 4; + // the OrderLimit authorizing storage of this piece, as signed by the satellite and sent by + // the uplink + orders.OrderLimit order_limit = 5 [(gogoproto.nullable) = false]; +} diff --git a/vendor/storj.io/common/pb/pointerdb.pb.go b/vendor/storj.io/common/pb/pointerdb.pb.go new file mode 100644 index 000000000..230187e96 --- /dev/null +++ b/vendor/storj.io/common/pb/pointerdb.pb.go @@ -0,0 +1,534 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: pointerdb.proto + +package pb + +import ( + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type RedundancyScheme_SchemeType int32 + +const ( + RedundancyScheme_INVALID RedundancyScheme_SchemeType = 0 + RedundancyScheme_RS RedundancyScheme_SchemeType = 1 +) + +var RedundancyScheme_SchemeType_name = map[int32]string{ + 0: "INVALID", + 1: "RS", +} + +var RedundancyScheme_SchemeType_value = map[string]int32{ + "INVALID": 0, + "RS": 1, +} + +func (x RedundancyScheme_SchemeType) String() string { + return proto.EnumName(RedundancyScheme_SchemeType_name, int32(x)) +} + +func (RedundancyScheme_SchemeType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{0, 0} +} + +type Pointer_DataType int32 + +const ( + Pointer_INLINE Pointer_DataType = 0 + Pointer_REMOTE Pointer_DataType = 1 +) + +var Pointer_DataType_name = map[int32]string{ + 0: "INLINE", + 1: "REMOTE", +} + +var Pointer_DataType_value = map[string]int32{ + "INLINE": 0, + "REMOTE": 1, +} + +func (x Pointer_DataType) String() string { + return proto.EnumName(Pointer_DataType_name, int32(x)) +} + +func (Pointer_DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{3, 0} +} + +type RedundancyScheme struct { + Type RedundancyScheme_SchemeType `protobuf:"varint,1,opt,name=type,proto3,enum=pointerdb.RedundancyScheme_SchemeType" json:"type,omitempty"` + // these values apply to RS encoding + MinReq int32 `protobuf:"varint,2,opt,name=min_req,json=minReq,proto3" json:"min_req,omitempty"` + Total int32 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` + RepairThreshold int32 `protobuf:"varint,4,opt,name=repair_threshold,json=repairThreshold,proto3" json:"repair_threshold,omitempty"` + SuccessThreshold int32 `protobuf:"varint,5,opt,name=success_threshold,json=successThreshold,proto3" json:"success_threshold,omitempty"` + ErasureShareSize int32 `protobuf:"varint,6,opt,name=erasure_share_size,json=erasureShareSize,proto3" json:"erasure_share_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RedundancyScheme) Reset() { *m = RedundancyScheme{} } +func (m *RedundancyScheme) String() string { return proto.CompactTextString(m) } +func (*RedundancyScheme) ProtoMessage() {} +func (*RedundancyScheme) Descriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{0} +} +func (m *RedundancyScheme) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RedundancyScheme.Unmarshal(m, b) +} +func (m *RedundancyScheme) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RedundancyScheme.Marshal(b, m, deterministic) +} +func (m *RedundancyScheme) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedundancyScheme.Merge(m, src) +} +func (m *RedundancyScheme) XXX_Size() int { + return xxx_messageInfo_RedundancyScheme.Size(m) +} +func (m *RedundancyScheme) XXX_DiscardUnknown() { + xxx_messageInfo_RedundancyScheme.DiscardUnknown(m) +} + +var xxx_messageInfo_RedundancyScheme proto.InternalMessageInfo + +func (m *RedundancyScheme) GetType() RedundancyScheme_SchemeType { + if m != nil { + return m.Type + } + return RedundancyScheme_INVALID +} + +func (m *RedundancyScheme) GetMinReq() int32 { + if m != nil { + return m.MinReq + } + return 0 +} + +func (m *RedundancyScheme) GetTotal() int32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *RedundancyScheme) GetRepairThreshold() int32 { + if m != nil { + return m.RepairThreshold + } + return 0 +} + +func (m *RedundancyScheme) GetSuccessThreshold() int32 { + if m != nil { + return m.SuccessThreshold + } + return 0 +} + +func (m *RedundancyScheme) GetErasureShareSize() int32 { + if m != nil { + return m.ErasureShareSize + } + return 0 +} + +type RemotePiece struct { + PieceNum int32 `protobuf:"varint,1,opt,name=piece_num,json=pieceNum,proto3" json:"piece_num,omitempty"` + NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"` + Hash *PieceHash `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemotePiece) Reset() { *m = RemotePiece{} } +func (m *RemotePiece) String() string { return proto.CompactTextString(m) } +func (*RemotePiece) ProtoMessage() {} +func (*RemotePiece) Descriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{1} +} +func (m *RemotePiece) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemotePiece.Unmarshal(m, b) +} +func (m *RemotePiece) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemotePiece.Marshal(b, m, deterministic) +} +func (m *RemotePiece) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemotePiece.Merge(m, src) +} +func (m *RemotePiece) XXX_Size() int { + return xxx_messageInfo_RemotePiece.Size(m) +} +func (m *RemotePiece) XXX_DiscardUnknown() { + xxx_messageInfo_RemotePiece.DiscardUnknown(m) +} + +var xxx_messageInfo_RemotePiece proto.InternalMessageInfo + +func (m *RemotePiece) GetPieceNum() int32 { + if m != nil { + return m.PieceNum + } + return 0 +} + +func (m *RemotePiece) GetHash() *PieceHash { + if m != nil { + return m.Hash + } + return nil +} + +type RemoteSegment struct { + Redundancy *RedundancyScheme `protobuf:"bytes,1,opt,name=redundancy,proto3" json:"redundancy,omitempty"` + RootPieceId PieceID `protobuf:"bytes,2,opt,name=root_piece_id,json=rootPieceId,proto3,customtype=PieceID" json:"root_piece_id"` + RemotePieces []*RemotePiece `protobuf:"bytes,3,rep,name=remote_pieces,json=remotePieces,proto3" json:"remote_pieces,omitempty"` + MerkleRoot []byte `protobuf:"bytes,4,opt,name=merkle_root,json=merkleRoot,proto3" json:"merkle_root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoteSegment) Reset() { *m = RemoteSegment{} } +func (m *RemoteSegment) String() string { return proto.CompactTextString(m) } +func (*RemoteSegment) ProtoMessage() {} +func (*RemoteSegment) Descriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{2} +} +func (m *RemoteSegment) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RemoteSegment.Unmarshal(m, b) +} +func (m *RemoteSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RemoteSegment.Marshal(b, m, deterministic) +} +func (m *RemoteSegment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoteSegment.Merge(m, src) +} +func (m *RemoteSegment) XXX_Size() int { + return xxx_messageInfo_RemoteSegment.Size(m) +} +func (m *RemoteSegment) XXX_DiscardUnknown() { + xxx_messageInfo_RemoteSegment.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoteSegment proto.InternalMessageInfo + +func (m *RemoteSegment) GetRedundancy() *RedundancyScheme { + if m != nil { + return m.Redundancy + } + return nil +} + +func (m *RemoteSegment) GetRemotePieces() []*RemotePiece { + if m != nil { + return m.RemotePieces + } + return nil +} + +func (m *RemoteSegment) GetMerkleRoot() []byte { + if m != nil { + return m.MerkleRoot + } + return nil +} + +type Pointer struct { + Type Pointer_DataType `protobuf:"varint,1,opt,name=type,proto3,enum=pointerdb.Pointer_DataType" json:"type,omitempty"` + InlineSegment []byte `protobuf:"bytes,3,opt,name=inline_segment,json=inlineSegment,proto3" json:"inline_segment,omitempty"` + Remote *RemoteSegment `protobuf:"bytes,4,opt,name=remote,proto3" json:"remote,omitempty"` + SegmentSize int64 `protobuf:"varint,5,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"` + CreationDate time.Time `protobuf:"bytes,6,opt,name=creation_date,json=creationDate,proto3,stdtime" json:"creation_date"` + ExpirationDate time.Time `protobuf:"bytes,7,opt,name=expiration_date,json=expirationDate,proto3,stdtime" json:"expiration_date"` + Metadata []byte `protobuf:"bytes,8,opt,name=metadata,proto3" json:"metadata,omitempty"` + LastRepaired time.Time `protobuf:"bytes,9,opt,name=last_repaired,json=lastRepaired,proto3,stdtime" json:"last_repaired"` + RepairCount int32 `protobuf:"varint,10,opt,name=repair_count,json=repairCount,proto3" json:"repair_count,omitempty"` + PieceHashesVerified bool `protobuf:"varint,11,opt,name=piece_hashes_verified,json=pieceHashesVerified,proto3" json:"piece_hashes_verified,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Pointer) Reset() { *m = Pointer{} } +func (m *Pointer) String() string { return proto.CompactTextString(m) } +func (*Pointer) ProtoMessage() {} +func (*Pointer) Descriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{3} +} +func (m *Pointer) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Pointer.Unmarshal(m, b) +} +func (m *Pointer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Pointer.Marshal(b, m, deterministic) +} +func (m *Pointer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pointer.Merge(m, src) +} +func (m *Pointer) XXX_Size() int { + return xxx_messageInfo_Pointer.Size(m) +} +func (m *Pointer) XXX_DiscardUnknown() { + xxx_messageInfo_Pointer.DiscardUnknown(m) +} + +var xxx_messageInfo_Pointer proto.InternalMessageInfo + +func (m *Pointer) GetType() Pointer_DataType { + if m != nil { + return m.Type + } + return Pointer_INLINE +} + +func (m *Pointer) GetInlineSegment() []byte { + if m != nil { + return m.InlineSegment + } + return nil +} + +func (m *Pointer) GetRemote() *RemoteSegment { + if m != nil { + return m.Remote + } + return nil +} + +func (m *Pointer) GetSegmentSize() int64 { + if m != nil { + return m.SegmentSize + } + return 0 +} + +func (m *Pointer) GetCreationDate() time.Time { + if m != nil { + return m.CreationDate + } + return time.Time{} +} + +func (m *Pointer) GetExpirationDate() time.Time { + if m != nil { + return m.ExpirationDate + } + return time.Time{} +} + +func (m *Pointer) GetMetadata() []byte { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Pointer) GetLastRepaired() time.Time { + if m != nil { + return m.LastRepaired + } + return time.Time{} +} + +func (m *Pointer) GetRepairCount() int32 { + if m != nil { + return m.RepairCount + } + return 0 +} + +func (m *Pointer) GetPieceHashesVerified() bool { + if m != nil { + return m.PieceHashesVerified + } + return false +} + +// ListResponse is a response message for the List rpc call +type ListResponse struct { + Items []*ListResponse_Item `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + More bool `protobuf:"varint,2,opt,name=more,proto3" json:"more,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResponse) Reset() { *m = ListResponse{} } +func (m *ListResponse) String() string { return proto.CompactTextString(m) } +func (*ListResponse) ProtoMessage() {} +func (*ListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{4} +} +func (m *ListResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResponse.Unmarshal(m, b) +} +func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic) +} +func (m *ListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResponse.Merge(m, src) +} +func (m *ListResponse) XXX_Size() int { + return xxx_messageInfo_ListResponse.Size(m) +} +func (m *ListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResponse proto.InternalMessageInfo + +func (m *ListResponse) GetItems() []*ListResponse_Item { + if m != nil { + return m.Items + } + return nil +} + +func (m *ListResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +type ListResponse_Item struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Pointer *Pointer `protobuf:"bytes,2,opt,name=pointer,proto3" json:"pointer,omitempty"` + IsPrefix bool `protobuf:"varint,3,opt,name=is_prefix,json=isPrefix,proto3" json:"is_prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListResponse_Item) Reset() { *m = ListResponse_Item{} } +func (m *ListResponse_Item) String() string { return proto.CompactTextString(m) } +func (*ListResponse_Item) ProtoMessage() {} +func (*ListResponse_Item) Descriptor() ([]byte, []int) { + return fileDescriptor_75fef806d28fc810, []int{4, 0} +} +func (m *ListResponse_Item) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListResponse_Item.Unmarshal(m, b) +} +func (m *ListResponse_Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListResponse_Item.Marshal(b, m, deterministic) +} +func (m *ListResponse_Item) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListResponse_Item.Merge(m, src) +} +func (m *ListResponse_Item) XXX_Size() int { + return xxx_messageInfo_ListResponse_Item.Size(m) +} +func (m *ListResponse_Item) XXX_DiscardUnknown() { + xxx_messageInfo_ListResponse_Item.DiscardUnknown(m) +} + +var xxx_messageInfo_ListResponse_Item proto.InternalMessageInfo + +func (m *ListResponse_Item) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *ListResponse_Item) GetPointer() *Pointer { + if m != nil { + return m.Pointer + } + return nil +} + +func (m *ListResponse_Item) GetIsPrefix() bool { + if m != nil { + return m.IsPrefix + } + return false +} + +func init() { + proto.RegisterEnum("pointerdb.RedundancyScheme_SchemeType", RedundancyScheme_SchemeType_name, RedundancyScheme_SchemeType_value) + proto.RegisterEnum("pointerdb.Pointer_DataType", Pointer_DataType_name, Pointer_DataType_value) + proto.RegisterType((*RedundancyScheme)(nil), "pointerdb.RedundancyScheme") + proto.RegisterType((*RemotePiece)(nil), "pointerdb.RemotePiece") + proto.RegisterType((*RemoteSegment)(nil), "pointerdb.RemoteSegment") + proto.RegisterType((*Pointer)(nil), "pointerdb.Pointer") + proto.RegisterType((*ListResponse)(nil), "pointerdb.ListResponse") + proto.RegisterType((*ListResponse_Item)(nil), "pointerdb.ListResponse.Item") +} + +func init() { proto.RegisterFile("pointerdb.proto", fileDescriptor_75fef806d28fc810) } + +var fileDescriptor_75fef806d28fc810 = []byte{ + // 810 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x36, 0x6d, 0xfd, 0xd0, 0x43, 0xc9, 0x56, 0xb6, 0x69, 0x4b, 0x28, 0x05, 0xe4, 0x08, 0x48, + 0xeb, 0xa2, 0x01, 0x55, 0x30, 0xb7, 0xe6, 0x54, 0x57, 0x06, 0x4a, 0xc0, 0x51, 0x8d, 0x95, 0x91, + 0x43, 0x2f, 0xc4, 0x4a, 0x1c, 0x4b, 0xdb, 0x6a, 0xb9, 0xcc, 0xee, 0xaa, 0x88, 0xfd, 0x14, 0x7d, + 0x8a, 0x3e, 0x40, 0xef, 0xbd, 0xf7, 0x19, 0x7a, 0x48, 0x5e, 0xa5, 0xd8, 0x5d, 0xea, 0x27, 0x0d, + 0x50, 0xc0, 0x17, 0x7b, 0x67, 0xe6, 0x9b, 0x1f, 0x7e, 0xf3, 0x8d, 0xe0, 0xb4, 0x92, 0xbc, 0x34, + 0xa8, 0x8a, 0x59, 0x52, 0x29, 0x69, 0x24, 0x39, 0xde, 0x3a, 0xfa, 0x83, 0x85, 0x94, 0x8b, 0x15, + 0x8e, 0x5c, 0x60, 0xb6, 0xbe, 0x1d, 0x19, 0x2e, 0x50, 0x1b, 0x26, 0x2a, 0x8f, 0xed, 0xc3, 0x42, + 0x2e, 0x64, 0xfd, 0xee, 0x48, 0x55, 0xa0, 0xd2, 0xde, 0x1a, 0xfe, 0x71, 0x08, 0x3d, 0x8a, 0xc5, + 0xba, 0x2c, 0x58, 0x39, 0xbf, 0x9b, 0xce, 0x97, 0x28, 0x90, 0x7c, 0x07, 0x0d, 0x73, 0x57, 0x61, + 0x1c, 0x9c, 0x05, 0xe7, 0x27, 0xe9, 0x97, 0xc9, 0xae, 0xf5, 0x7f, 0xa1, 0x89, 0xff, 0x77, 0x73, + 0x57, 0x21, 0x75, 0x39, 0xe4, 0x73, 0x68, 0x0b, 0x5e, 0xe6, 0x0a, 0xdf, 0xc4, 0x87, 0x67, 0xc1, + 0x79, 0x93, 0xb6, 0x04, 0x2f, 0x29, 0xbe, 0x21, 0x8f, 0xa1, 0x69, 0xa4, 0x61, 0xab, 0xf8, 0xc8, + 0xb9, 0xbd, 0x41, 0xbe, 0x86, 0x9e, 0xc2, 0x8a, 0x71, 0x95, 0x9b, 0xa5, 0x42, 0xbd, 0x94, 0xab, + 0x22, 0x6e, 0x38, 0xc0, 0xa9, 0xf7, 0xdf, 0x6c, 0xdc, 0xe4, 0x1b, 0x78, 0xa4, 0xd7, 0xf3, 0x39, + 0x6a, 0xbd, 0x87, 0x6d, 0x3a, 0x6c, 0xaf, 0x0e, 0xec, 0xc0, 0xcf, 0x81, 0xa0, 0x62, 0x7a, 0xad, + 0x30, 0xd7, 0x4b, 0x66, 0xff, 0xf2, 0x7b, 0x8c, 0x5b, 0x1e, 0x5d, 0x47, 0xa6, 0x36, 0x30, 0xe5, + 0xf7, 0x38, 0x7c, 0x0a, 0xb0, 0xfb, 0x10, 0x12, 0x41, 0x3b, 0x9b, 0xbc, 0xfe, 0xfe, 0x2a, 0x1b, + 0xf7, 0x0e, 0x48, 0x0b, 0x0e, 0xe9, 0xb4, 0x17, 0x0c, 0xef, 0x21, 0xa2, 0x28, 0xa4, 0xc1, 0x6b, + 0x8e, 0x73, 0x24, 0x4f, 0xe0, 0xb8, 0xb2, 0x8f, 0xbc, 0x5c, 0x0b, 0xc7, 0x53, 0x93, 0x86, 0xce, + 0x31, 0x59, 0x0b, 0xf2, 0x15, 0xb4, 0x4b, 0x59, 0x60, 0xce, 0x0b, 0xc7, 0x41, 0xe7, 0xe2, 0xe4, + 0xef, 0x77, 0x83, 0x83, 0x7f, 0xde, 0x0d, 0x5a, 0x13, 0x59, 0x60, 0x36, 0xa6, 0x2d, 0x1b, 0xce, + 0x0a, 0xf2, 0x0c, 0x1a, 0x4b, 0xa6, 0x97, 0x8e, 0x92, 0x28, 0x7d, 0x94, 0xd4, 0xab, 0x71, 0x2d, + 0x7e, 0x64, 0x7a, 0x49, 0x5d, 0x78, 0xf8, 0x3e, 0x80, 0xae, 0x6f, 0x3e, 0xc5, 0x85, 0xc0, 0xd2, + 0x90, 0x97, 0x00, 0x6a, 0xbb, 0x0a, 0xd7, 0x3f, 0x4a, 0x9f, 0xfc, 0xcf, 0x9e, 0xe8, 0x1e, 0x9c, + 0xbc, 0x80, 0xae, 0x92, 0xd2, 0xe4, 0xfe, 0x03, 0xb6, 0x43, 0x9e, 0xd6, 0x43, 0xb6, 0x5d, 0xfb, + 0x6c, 0x4c, 0x23, 0x8b, 0xf2, 0x46, 0x41, 0x5e, 0x42, 0x57, 0xb9, 0x11, 0x7c, 0x9a, 0x8e, 0x8f, + 0xce, 0x8e, 0xce, 0xa3, 0xf4, 0xb3, 0x0f, 0x9a, 0x6e, 0xf9, 0xa1, 0x1d, 0xb5, 0x33, 0x34, 0x19, + 0x40, 0x24, 0x50, 0xfd, 0xba, 0xc2, 0xdc, 0x96, 0x74, 0x0b, 0xee, 0x50, 0xf0, 0x2e, 0x2a, 0xa5, + 0x19, 0xfe, 0xd9, 0x80, 0xf6, 0xb5, 0x2f, 0x44, 0x46, 0x1f, 0xa8, 0x6f, 0xff, 0xab, 0x6a, 0x44, + 0x32, 0x66, 0x86, 0xed, 0x49, 0xee, 0x19, 0x9c, 0xf0, 0x72, 0xc5, 0x4b, 0xcc, 0xb5, 0xa7, 0xc7, + 0xf1, 0xd9, 0xa1, 0x5d, 0xef, 0xdd, 0x70, 0xf6, 0x2d, 0xb4, 0xfc, 0x50, 0xae, 0x7f, 0x94, 0xc6, + 0x1f, 0x8d, 0x5e, 0x23, 0x69, 0x8d, 0x23, 0x4f, 0xa1, 0x53, 0x57, 0xf4, 0xf2, 0xb1, 0x62, 0x3b, + 0xa2, 0x51, 0xed, 0xb3, 0xca, 0x21, 0x19, 0x74, 0xe7, 0x0a, 0x99, 0xe1, 0xb2, 0xcc, 0x0b, 0x66, + 0xbc, 0xc4, 0xa2, 0xb4, 0x9f, 0xf8, 0x93, 0x4c, 0x36, 0x27, 0x99, 0xdc, 0x6c, 0x4e, 0xf2, 0x22, + 0xb4, 0x3c, 0xff, 0xfe, 0x7e, 0x10, 0xd0, 0xce, 0x26, 0x75, 0xcc, 0x0c, 0x92, 0x57, 0x70, 0x8a, + 0x6f, 0x2b, 0xae, 0xf6, 0x8a, 0xb5, 0x1f, 0x50, 0xec, 0x64, 0x97, 0xec, 0xca, 0xf5, 0x21, 0x14, + 0x68, 0x58, 0xc1, 0x0c, 0x8b, 0x43, 0xc7, 0xc7, 0xd6, 0xb6, 0x53, 0xaf, 0x98, 0x36, 0xb9, 0x3f, + 0x31, 0x2c, 0xe2, 0xe3, 0x87, 0x4c, 0x6d, 0x53, 0x69, 0x9d, 0x69, 0x39, 0xaa, 0x0f, 0x78, 0x2e, + 0xd7, 0xa5, 0x89, 0xc1, 0xdd, 0x42, 0xe4, 0x7d, 0x3f, 0x58, 0x17, 0x49, 0xe1, 0x53, 0x2f, 0x35, + 0x2b, 0x66, 0xd4, 0xf9, 0x6f, 0xa8, 0xf8, 0x2d, 0xc7, 0x22, 0x8e, 0xce, 0x82, 0xf3, 0x90, 0x7e, + 0x52, 0x6d, 0xe4, 0x8e, 0xfa, 0x75, 0x1d, 0x1a, 0x0e, 0x21, 0xdc, 0x6c, 0x99, 0x00, 0xb4, 0xb2, + 0xc9, 0x55, 0x36, 0xb9, 0xec, 0x1d, 0xd8, 0x37, 0xbd, 0x7c, 0xf5, 0xd3, 0xcd, 0x65, 0x2f, 0x18, + 0xfe, 0x15, 0x40, 0xe7, 0x8a, 0xdb, 0x59, 0x74, 0x25, 0x4b, 0x8d, 0x24, 0x85, 0x26, 0x37, 0x28, + 0x74, 0x1c, 0x38, 0x6d, 0x7e, 0xb1, 0xb7, 0xe0, 0x7d, 0x5c, 0x92, 0x19, 0x14, 0xd4, 0x43, 0x09, + 0x81, 0x86, 0x90, 0x0a, 0xdd, 0x0d, 0x84, 0xd4, 0xbd, 0xfb, 0x08, 0x0d, 0x0b, 0xb1, 0xb1, 0x8a, + 0x99, 0xa5, 0x53, 0xe2, 0x31, 0x75, 0x6f, 0xf2, 0x1c, 0xda, 0x75, 0x55, 0x97, 0x12, 0xa5, 0xe4, + 0x63, 0x81, 0xd2, 0x0d, 0xc4, 0xfe, 0x4c, 0x70, 0x9d, 0x57, 0x0a, 0x6f, 0xf9, 0x5b, 0xa7, 0xca, + 0x90, 0x86, 0x5c, 0x5f, 0x3b, 0xfb, 0xe2, 0xf1, 0xcf, 0x44, 0x1b, 0xa9, 0x7e, 0x49, 0xb8, 0x1c, + 0xcd, 0xa5, 0x10, 0xb2, 0x1c, 0x55, 0xb3, 0x59, 0xcb, 0x91, 0xff, 0xe2, 0xdf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xbf, 0x28, 0x12, 0xfc, 0xf1, 0x05, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/pointerdb.proto b/vendor/storj.io/common/pb/pointerdb.proto new file mode 100644 index 000000000..c0896f231 --- /dev/null +++ b/vendor/storj.io/common/pb/pointerdb.proto @@ -0,0 +1,74 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package pointerdb; + +import "google/protobuf/timestamp.proto"; +import "gogo.proto"; +import "orders.proto"; + +message RedundancyScheme { + enum SchemeType { + INVALID = 0; + RS = 1; + } + SchemeType type = 1; + + // these values apply to RS encoding + int32 min_req = 2; // minimum required for reconstruction + int32 total = 3; // total amount of pieces we generated + int32 repair_threshold = 4; // amount of pieces we need to drop to before triggering repair + int32 success_threshold = 5; // amount of pieces we need to store to call it a success + + int32 erasure_share_size = 6; +} + +message RemotePiece { + int32 piece_num = 1; + bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + orders.PieceHash hash = 3; +} + +message RemoteSegment { + RedundancyScheme redundancy = 1; + bytes root_piece_id = 2 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false]; + repeated RemotePiece remote_pieces = 3; + bytes merkle_root = 4; // root hash of the hashes of all of these pieces +} + +message Pointer { + enum DataType { + INLINE = 0; + REMOTE = 1; + } + + DataType type = 1; + + bytes inline_segment = 3; + RemoteSegment remote = 4; + int64 segment_size = 5; + + google.protobuf.Timestamp creation_date = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp expiration_date = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes metadata = 8; + + google.protobuf.Timestamp last_repaired = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + int32 repair_count = 10; + bool piece_hashes_verified = 11; +} + +// ListResponse is a response message for the List rpc call +message ListResponse { + message Item { + string path = 1; + Pointer pointer = 2; + bool is_prefix = 3; + } + + repeated Item items = 1; + bool more = 2; +} diff --git a/vendor/storj.io/common/pb/referralmanager.pb.go b/vendor/storj.io/common/pb/referralmanager.pb.go new file mode 100644 index 000000000..12c05a60c --- /dev/null +++ b/vendor/storj.io/common/pb/referralmanager.pb.go @@ -0,0 +1,326 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: referralmanager.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type GetTokensRequest struct { + OwnerUserId []byte `protobuf:"bytes,1,opt,name=owner_user_id,json=ownerUserId,proto3" json:"owner_user_id,omitempty"` + OwnerSatelliteId NodeID `protobuf:"bytes,2,opt,name=owner_satellite_id,json=ownerSatelliteId,proto3,customtype=NodeID" json:"owner_satellite_id"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTokensRequest) Reset() { *m = GetTokensRequest{} } +func (m *GetTokensRequest) String() string { return proto.CompactTextString(m) } +func (*GetTokensRequest) ProtoMessage() {} +func (*GetTokensRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_45d96ad24f1e021c, []int{0} +} +func (m *GetTokensRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTokensRequest.Unmarshal(m, b) +} +func (m *GetTokensRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTokensRequest.Marshal(b, m, deterministic) +} +func (m *GetTokensRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTokensRequest.Merge(m, src) +} +func (m *GetTokensRequest) XXX_Size() int { + return xxx_messageInfo_GetTokensRequest.Size(m) +} +func (m *GetTokensRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTokensRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTokensRequest proto.InternalMessageInfo + +func (m *GetTokensRequest) GetOwnerUserId() []byte { + if m != nil { + return m.OwnerUserId + } + return nil +} + +type GetTokensResponse struct { + TokenSecrets [][]byte `protobuf:"bytes,1,rep,name=token_secrets,json=tokenSecrets,proto3" json:"token_secrets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetTokensResponse) Reset() { *m = GetTokensResponse{} } +func (m *GetTokensResponse) String() string { return proto.CompactTextString(m) } +func (*GetTokensResponse) ProtoMessage() {} +func (*GetTokensResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_45d96ad24f1e021c, []int{1} +} +func (m *GetTokensResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetTokensResponse.Unmarshal(m, b) +} +func (m *GetTokensResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetTokensResponse.Marshal(b, m, deterministic) +} +func (m *GetTokensResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTokensResponse.Merge(m, src) +} +func (m *GetTokensResponse) XXX_Size() int { + return xxx_messageInfo_GetTokensResponse.Size(m) +} +func (m *GetTokensResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTokensResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTokensResponse proto.InternalMessageInfo + +func (m *GetTokensResponse) GetTokenSecrets() [][]byte { + if m != nil { + return m.TokenSecrets + } + return nil +} + +type RedeemTokenRequest struct { + Token []byte `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + RedeemUserId []byte `protobuf:"bytes,2,opt,name=redeem_user_id,json=redeemUserId,proto3" json:"redeem_user_id,omitempty"` + RedeemSatelliteId NodeID `protobuf:"bytes,3,opt,name=redeem_satellite_id,json=redeemSatelliteId,proto3,customtype=NodeID" json:"redeem_satellite_id"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RedeemTokenRequest) Reset() { *m = RedeemTokenRequest{} } +func (m *RedeemTokenRequest) String() string { return proto.CompactTextString(m) } +func (*RedeemTokenRequest) ProtoMessage() {} +func (*RedeemTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_45d96ad24f1e021c, []int{2} +} +func (m *RedeemTokenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RedeemTokenRequest.Unmarshal(m, b) +} +func (m *RedeemTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RedeemTokenRequest.Marshal(b, m, deterministic) +} +func (m *RedeemTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedeemTokenRequest.Merge(m, src) +} +func (m *RedeemTokenRequest) XXX_Size() int { + return xxx_messageInfo_RedeemTokenRequest.Size(m) +} +func (m *RedeemTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RedeemTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RedeemTokenRequest proto.InternalMessageInfo + +func (m *RedeemTokenRequest) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *RedeemTokenRequest) GetRedeemUserId() []byte { + if m != nil { + return m.RedeemUserId + } + return nil +} + +type RedeemTokenResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RedeemTokenResponse) Reset() { *m = RedeemTokenResponse{} } +func (m *RedeemTokenResponse) String() string { return proto.CompactTextString(m) } +func (*RedeemTokenResponse) ProtoMessage() {} +func (*RedeemTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_45d96ad24f1e021c, []int{3} +} +func (m *RedeemTokenResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RedeemTokenResponse.Unmarshal(m, b) +} +func (m *RedeemTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RedeemTokenResponse.Marshal(b, m, deterministic) +} +func (m *RedeemTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedeemTokenResponse.Merge(m, src) +} +func (m *RedeemTokenResponse) XXX_Size() int { + return xxx_messageInfo_RedeemTokenResponse.Size(m) +} +func (m *RedeemTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RedeemTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RedeemTokenResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*GetTokensRequest)(nil), "referralmanager.GetTokensRequest") + proto.RegisterType((*GetTokensResponse)(nil), "referralmanager.GetTokensResponse") + proto.RegisterType((*RedeemTokenRequest)(nil), "referralmanager.RedeemTokenRequest") + proto.RegisterType((*RedeemTokenResponse)(nil), "referralmanager.RedeemTokenResponse") +} + +func init() { proto.RegisterFile("referralmanager.proto", fileDescriptor_45d96ad24f1e021c) } + +var fileDescriptor_45d96ad24f1e021c = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0xbf, 0x7e, 0x44, 0x12, 0x2f, 0xe5, 0xdf, 0x00, 0x09, 0x61, 0x03, 0x16, 0x16, 0xac, + 0x20, 0xd1, 0x8d, 0x0b, 0xe3, 0x82, 0x98, 0x18, 0x16, 0xba, 0x18, 0x34, 0x31, 0x6e, 0x48, 0xa1, + 0x57, 0x52, 0x6d, 0x7b, 0xeb, 0xcc, 0x10, 0x5f, 0xc3, 0x37, 0x72, 0xeb, 0x33, 0xb8, 0xe0, 0x59, + 0x4c, 0x67, 0x0a, 0x29, 0xc5, 0xb0, 0xec, 0xe9, 0xef, 0xf6, 0x9e, 0x73, 0x6e, 0xa1, 0x25, 0xf0, + 0x05, 0x85, 0x70, 0x83, 0xd0, 0x8d, 0xdc, 0x15, 0x8a, 0x51, 0x2c, 0x48, 0x11, 0xab, 0xe6, 0xe4, + 0x0e, 0xac, 0x68, 0x45, 0xe6, 0xa5, 0xa3, 0xa0, 0x76, 0x8b, 0xea, 0x81, 0xde, 0x30, 0x92, 0x1c, + 0xdf, 0xd7, 0x28, 0x15, 0x73, 0xa0, 0x4c, 0x1f, 0x11, 0x8a, 0xf9, 0x5a, 0xa2, 0x98, 0xfb, 0x5e, + 0xdb, 0xea, 0x59, 0x43, 0x9b, 0x97, 0xb4, 0xf8, 0x28, 0x51, 0x4c, 0x3d, 0x76, 0x05, 0xcc, 0x30, + 0xd2, 0x55, 0x18, 0x04, 0xbe, 0xc2, 0x04, 0xfc, 0x9f, 0x80, 0x93, 0xca, 0xf7, 0xa6, 0xfb, 0xef, + 0x67, 0xd3, 0x2d, 0xde, 0x93, 0x87, 0xd3, 0x1b, 0x5e, 0xd3, 0xe4, 0x6c, 0x0b, 0x4e, 0x3d, 0xe7, + 0x12, 0xea, 0x99, 0xad, 0x32, 0xa6, 0x48, 0x22, 0xeb, 0x43, 0x59, 0x25, 0xca, 0x5c, 0xe2, 0x52, + 0xa0, 0x92, 0x6d, 0xab, 0x57, 0x18, 0xda, 0xdc, 0xd6, 0xe2, 0xcc, 0x68, 0xce, 0xa7, 0x05, 0x8c, + 0xa3, 0x87, 0x18, 0xea, 0xe9, 0xad, 0xe5, 0x26, 0x9c, 0x68, 0x2c, 0xb5, 0x6a, 0x1e, 0xd8, 0x00, + 0x2a, 0x42, 0xb3, 0xbb, 0x24, 0xda, 0x20, 0xb7, 0x8d, 0x9a, 0x46, 0xb9, 0x86, 0x46, 0x4a, 0xed, + 0x65, 0x29, 0xfc, 0x99, 0xa5, 0x6e, 0xd0, 0x6c, 0x98, 0x16, 0x34, 0xf6, 0x1c, 0x99, 0x38, 0xe7, + 0x5f, 0x16, 0x54, 0x79, 0xda, 0xfc, 0x9d, 0x69, 0x9e, 0x71, 0x38, 0xdd, 0xe5, 0x66, 0x67, 0xa3, + 0xfc, 0xbd, 0xf2, 0x97, 0xe8, 0x38, 0xc7, 0x90, 0xb4, 0xb6, 0x27, 0x28, 0x65, 0xd6, 0xb3, 0xfe, + 0xc1, 0xc8, 0x61, 0x5d, 0x9d, 0xc1, 0x71, 0xc8, 0x7c, 0x79, 0xd2, 0x7c, 0x66, 0x52, 0x91, 0x78, + 0x1d, 0xf9, 0x34, 0x5e, 0x52, 0x18, 0x52, 0x34, 0x8e, 0x17, 0x8b, 0xa2, 0xfe, 0x71, 0x2e, 0x7e, + 0x03, 0x00, 0x00, 0xff, 0xff, 0x85, 0x79, 0x33, 0xba, 0x6e, 0x02, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCReferralManagerClient interface { + DRPCConn() drpc.Conn + + // GetTokens retrieves a list of unredeemed tokens for a user + GetTokens(ctx context.Context, in *GetTokensRequest) (*GetTokensResponse, error) + // RedeemToken saves newly created user info in referral manager + RedeemToken(ctx context.Context, in *RedeemTokenRequest) (*RedeemTokenResponse, error) +} + +type drpcReferralManagerClient struct { + cc drpc.Conn +} + +func NewDRPCReferralManagerClient(cc drpc.Conn) DRPCReferralManagerClient { + return &drpcReferralManagerClient{cc} +} + +func (c *drpcReferralManagerClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcReferralManagerClient) GetTokens(ctx context.Context, in *GetTokensRequest) (*GetTokensResponse, error) { + out := new(GetTokensResponse) + err := c.cc.Invoke(ctx, "/referralmanager.ReferralManager/GetTokens", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *drpcReferralManagerClient) RedeemToken(ctx context.Context, in *RedeemTokenRequest) (*RedeemTokenResponse, error) { + out := new(RedeemTokenResponse) + err := c.cc.Invoke(ctx, "/referralmanager.ReferralManager/RedeemToken", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCReferralManagerServer interface { + // GetTokens retrieves a list of unredeemed tokens for a user + GetTokens(context.Context, *GetTokensRequest) (*GetTokensResponse, error) + // RedeemToken saves newly created user info in referral manager + RedeemToken(context.Context, *RedeemTokenRequest) (*RedeemTokenResponse, error) +} + +type DRPCReferralManagerDescription struct{} + +func (DRPCReferralManagerDescription) NumMethods() int { return 2 } + +func (DRPCReferralManagerDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/referralmanager.ReferralManager/GetTokens", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCReferralManagerServer). + GetTokens( + ctx, + in1.(*GetTokensRequest), + ) + }, DRPCReferralManagerServer.GetTokens, true + case 1: + return "/referralmanager.ReferralManager/RedeemToken", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCReferralManagerServer). + RedeemToken( + ctx, + in1.(*RedeemTokenRequest), + ) + }, DRPCReferralManagerServer.RedeemToken, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterReferralManager(mux drpc.Mux, impl DRPCReferralManagerServer) error { + return mux.Register(impl, DRPCReferralManagerDescription{}) +} + +type DRPCReferralManager_GetTokensStream interface { + drpc.Stream + SendAndClose(*GetTokensResponse) error +} + +type drpcReferralManagerGetTokensStream struct { + drpc.Stream +} + +func (x *drpcReferralManagerGetTokensStream) SendAndClose(m *GetTokensResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +type DRPCReferralManager_RedeemTokenStream interface { + drpc.Stream + SendAndClose(*RedeemTokenResponse) error +} + +type drpcReferralManagerRedeemTokenStream struct { + drpc.Stream +} + +func (x *drpcReferralManagerRedeemTokenStream) SendAndClose(m *RedeemTokenResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/referralmanager.proto b/vendor/storj.io/common/pb/referralmanager.proto new file mode 100644 index 000000000..96230624b --- /dev/null +++ b/vendor/storj.io/common/pb/referralmanager.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +import "gogo.proto"; + +package referralmanager; + +// ReferralManager is a service for handling referrals +service ReferralManager { + // GetTokens retrieves a list of unredeemed tokens for a user + rpc GetTokens(GetTokensRequest) returns (GetTokensResponse); + // RedeemToken saves newly created user info in referral manager + rpc RedeemToken(RedeemTokenRequest) returns (RedeemTokenResponse); +} + +message GetTokensRequest{ + bytes owner_user_id = 1; + bytes owner_satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; +} + +message GetTokensResponse { + repeated bytes token_secrets = 1; +} + +message RedeemTokenRequest { + bytes token = 1; + bytes redeem_user_id = 2; + bytes redeem_satellite_id = 3 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; +} + +message RedeemTokenResponse {} diff --git a/vendor/storj.io/common/pb/scannerValuer.go b/vendor/storj.io/common/pb/scannerValuer.go new file mode 100644 index 000000000..258d790c4 --- /dev/null +++ b/vendor/storj.io/common/pb/scannerValuer.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pb + +import ( + "database/sql/driver" + + proto "github.com/gogo/protobuf/proto" + "github.com/zeebo/errs" +) + +var scanError = errs.Class("Protobuf Scanner") +var valueError = errs.Class("Protobuf Valuer") + +//scan automatically converts database []byte to proto.Messages +func scan(msg proto.Message, value interface{}) error { + bytes, ok := value.([]byte) + if !ok { + return scanError.New("%t was %t, expected []bytes", msg, value) + } + return scanError.Wrap(Unmarshal(bytes, msg)) +} + +//value automatically converts proto.Messages to database []byte +func value(msg proto.Message) (driver.Value, error) { + value, err := Marshal(msg) + return value, valueError.Wrap(err) +} + +// Scan implements the Scanner interface. +func (n *InjuredSegment) Scan(value interface{}) error { + return scan(n, value) +} + +// Value implements the driver Valuer interface. +func (n InjuredSegment) Value() (driver.Value, error) { + return value(&n) +} diff --git a/vendor/storj.io/common/pb/scope.pb.go b/vendor/storj.io/common/pb/scope.pb.go new file mode 100644 index 000000000..0073476b2 --- /dev/null +++ b/vendor/storj.io/common/pb/scope.pb.go @@ -0,0 +1,98 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: scope.proto + +package pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Scope struct { + SatelliteAddr string `protobuf:"bytes,1,opt,name=satellite_addr,json=satelliteAddr,proto3" json:"satellite_addr,omitempty"` + ApiKey []byte `protobuf:"bytes,2,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"` + EncryptionAccess *EncryptionAccess `protobuf:"bytes,3,opt,name=encryption_access,json=encryptionAccess,proto3" json:"encryption_access,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Scope) Reset() { *m = Scope{} } +func (m *Scope) String() string { return proto.CompactTextString(m) } +func (*Scope) ProtoMessage() {} +func (*Scope) Descriptor() ([]byte, []int) { + return fileDescriptor_c67276d5d71daf81, []int{0} +} +func (m *Scope) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Scope.Unmarshal(m, b) +} +func (m *Scope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Scope.Marshal(b, m, deterministic) +} +func (m *Scope) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scope.Merge(m, src) +} +func (m *Scope) XXX_Size() int { + return xxx_messageInfo_Scope.Size(m) +} +func (m *Scope) XXX_DiscardUnknown() { + xxx_messageInfo_Scope.DiscardUnknown(m) +} + +var xxx_messageInfo_Scope proto.InternalMessageInfo + +func (m *Scope) GetSatelliteAddr() string { + if m != nil { + return m.SatelliteAddr + } + return "" +} + +func (m *Scope) GetApiKey() []byte { + if m != nil { + return m.ApiKey + } + return nil +} + +func (m *Scope) GetEncryptionAccess() *EncryptionAccess { + if m != nil { + return m.EncryptionAccess + } + return nil +} + +func init() { + proto.RegisterType((*Scope)(nil), "scope.Scope") +} + +func init() { proto.RegisterFile("scope.proto", fileDescriptor_c67276d5d71daf81) } + +var fileDescriptor_c67276d5d71daf81 = []byte{ + // 182 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0x4e, 0xce, 0x2f, + 0x48, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x73, 0xa4, 0xc4, 0x53, 0xf3, 0x92, + 0x8b, 0x2a, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xe2, 0x13, 0x93, 0x93, 0x53, 0x8b, 0x8b, 0x21, 0xf2, + 0x4a, 0x33, 0x19, 0xb9, 0x58, 0x83, 0x41, 0x4a, 0x84, 0x54, 0xb9, 0xf8, 0x8a, 0x13, 0x4b, 0x52, + 0x73, 0x72, 0x32, 0x4b, 0x52, 0xe3, 0x13, 0x53, 0x52, 0x8a, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, + 0x83, 0x78, 0xe1, 0xa2, 0x8e, 0x29, 0x29, 0x45, 0x42, 0xe2, 0x5c, 0xec, 0x89, 0x05, 0x99, 0xf1, + 0xd9, 0xa9, 0x95, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x6c, 0x89, 0x05, 0x99, 0xde, 0xa9, + 0x95, 0x42, 0x01, 0x5c, 0x82, 0x18, 0x96, 0x48, 0x30, 0x2b, 0x30, 0x6a, 0x70, 0x1b, 0x29, 0xeb, + 0x61, 0x5a, 0xef, 0x0a, 0x17, 0x71, 0x04, 0x0b, 0x04, 0x09, 0xa4, 0xa2, 0x89, 0x38, 0x89, 0x44, + 0x09, 0x15, 0x97, 0xe4, 0x17, 0x65, 0xe9, 0x65, 0xe6, 0xeb, 0x27, 0xe7, 0xe7, 0xe6, 0xe6, 0xe7, + 0xe9, 0x17, 0x24, 0x25, 0xb1, 0x81, 0x1d, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xab, 0xe3, + 0xb0, 0xca, 0xe7, 0x00, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/scope.proto b/vendor/storj.io/common/pb/scope.proto new file mode 100644 index 000000000..ebab90221 --- /dev/null +++ b/vendor/storj.io/common/pb/scope.proto @@ -0,0 +1,18 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; + +option go_package = "storj.io/common/pb"; + +package scope; + +import "encryption_access.proto"; + +message Scope { + string satellite_addr = 1; + + bytes api_key = 2; + + encryption_access.EncryptionAccess encryption_access = 3; +} diff --git a/vendor/storj.io/common/pb/streams.pb.go b/vendor/storj.io/common/pb/streams.pb.go new file mode 100644 index 000000000..5bf12979d --- /dev/null +++ b/vendor/storj.io/common/pb/streams.pb.go @@ -0,0 +1,234 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: streams.proto + +package pb + +import ( + fmt "fmt" + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type SegmentMeta struct { + EncryptedKey []byte `protobuf:"bytes,1,opt,name=encrypted_key,json=encryptedKey,proto3" json:"encrypted_key,omitempty"` + KeyNonce []byte `protobuf:"bytes,2,opt,name=key_nonce,json=keyNonce,proto3" json:"key_nonce,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SegmentMeta) Reset() { *m = SegmentMeta{} } +func (m *SegmentMeta) String() string { return proto.CompactTextString(m) } +func (*SegmentMeta) ProtoMessage() {} +func (*SegmentMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_c6bbf8af0ec331d6, []int{0} +} +func (m *SegmentMeta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SegmentMeta.Unmarshal(m, b) +} +func (m *SegmentMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SegmentMeta.Marshal(b, m, deterministic) +} +func (m *SegmentMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_SegmentMeta.Merge(m, src) +} +func (m *SegmentMeta) XXX_Size() int { + return xxx_messageInfo_SegmentMeta.Size(m) +} +func (m *SegmentMeta) XXX_DiscardUnknown() { + xxx_messageInfo_SegmentMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_SegmentMeta proto.InternalMessageInfo + +func (m *SegmentMeta) GetEncryptedKey() []byte { + if m != nil { + return m.EncryptedKey + } + return nil +} + +func (m *SegmentMeta) GetKeyNonce() []byte { + if m != nil { + return m.KeyNonce + } + return nil +} + +type StreamInfo struct { + DeprecatedNumberOfSegments int64 `protobuf:"varint,1,opt,name=deprecated_number_of_segments,json=deprecatedNumberOfSegments,proto3" json:"deprecated_number_of_segments,omitempty"` + SegmentsSize int64 `protobuf:"varint,2,opt,name=segments_size,json=segmentsSize,proto3" json:"segments_size,omitempty"` + LastSegmentSize int64 `protobuf:"varint,3,opt,name=last_segment_size,json=lastSegmentSize,proto3" json:"last_segment_size,omitempty"` + Metadata []byte `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamInfo) Reset() { *m = StreamInfo{} } +func (m *StreamInfo) String() string { return proto.CompactTextString(m) } +func (*StreamInfo) ProtoMessage() {} +func (*StreamInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_c6bbf8af0ec331d6, []int{1} +} +func (m *StreamInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamInfo.Unmarshal(m, b) +} +func (m *StreamInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamInfo.Marshal(b, m, deterministic) +} +func (m *StreamInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamInfo.Merge(m, src) +} +func (m *StreamInfo) XXX_Size() int { + return xxx_messageInfo_StreamInfo.Size(m) +} +func (m *StreamInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StreamInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamInfo proto.InternalMessageInfo + +func (m *StreamInfo) GetDeprecatedNumberOfSegments() int64 { + if m != nil { + return m.DeprecatedNumberOfSegments + } + return 0 +} + +func (m *StreamInfo) GetSegmentsSize() int64 { + if m != nil { + return m.SegmentsSize + } + return 0 +} + +func (m *StreamInfo) GetLastSegmentSize() int64 { + if m != nil { + return m.LastSegmentSize + } + return 0 +} + +func (m *StreamInfo) GetMetadata() []byte { + if m != nil { + return m.Metadata + } + return nil +} + +type StreamMeta struct { + EncryptedStreamInfo []byte `protobuf:"bytes,1,opt,name=encrypted_stream_info,json=encryptedStreamInfo,proto3" json:"encrypted_stream_info,omitempty"` + EncryptionType int32 `protobuf:"varint,2,opt,name=encryption_type,json=encryptionType,proto3" json:"encryption_type,omitempty"` + EncryptionBlockSize int32 `protobuf:"varint,3,opt,name=encryption_block_size,json=encryptionBlockSize,proto3" json:"encryption_block_size,omitempty"` + LastSegmentMeta *SegmentMeta `protobuf:"bytes,4,opt,name=last_segment_meta,json=lastSegmentMeta,proto3" json:"last_segment_meta,omitempty"` + NumberOfSegments int64 `protobuf:"varint,5,opt,name=number_of_segments,json=numberOfSegments,proto3" json:"number_of_segments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamMeta) Reset() { *m = StreamMeta{} } +func (m *StreamMeta) String() string { return proto.CompactTextString(m) } +func (*StreamMeta) ProtoMessage() {} +func (*StreamMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_c6bbf8af0ec331d6, []int{2} +} +func (m *StreamMeta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamMeta.Unmarshal(m, b) +} +func (m *StreamMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamMeta.Marshal(b, m, deterministic) +} +func (m *StreamMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamMeta.Merge(m, src) +} +func (m *StreamMeta) XXX_Size() int { + return xxx_messageInfo_StreamMeta.Size(m) +} +func (m *StreamMeta) XXX_DiscardUnknown() { + xxx_messageInfo_StreamMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamMeta proto.InternalMessageInfo + +func (m *StreamMeta) GetEncryptedStreamInfo() []byte { + if m != nil { + return m.EncryptedStreamInfo + } + return nil +} + +func (m *StreamMeta) GetEncryptionType() int32 { + if m != nil { + return m.EncryptionType + } + return 0 +} + +func (m *StreamMeta) GetEncryptionBlockSize() int32 { + if m != nil { + return m.EncryptionBlockSize + } + return 0 +} + +func (m *StreamMeta) GetLastSegmentMeta() *SegmentMeta { + if m != nil { + return m.LastSegmentMeta + } + return nil +} + +func (m *StreamMeta) GetNumberOfSegments() int64 { + if m != nil { + return m.NumberOfSegments + } + return 0 +} + +func init() { + proto.RegisterType((*SegmentMeta)(nil), "streams.SegmentMeta") + proto.RegisterType((*StreamInfo)(nil), "streams.StreamInfo") + proto.RegisterType((*StreamMeta)(nil), "streams.StreamMeta") +} + +func init() { proto.RegisterFile("streams.proto", fileDescriptor_c6bbf8af0ec331d6) } + +var fileDescriptor_c6bbf8af0ec331d6 = []byte{ + // 343 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x41, 0x4f, 0xf2, 0x40, + 0x10, 0x0d, 0xf0, 0xf1, 0x89, 0x0b, 0x88, 0xae, 0x98, 0x10, 0x8c, 0x89, 0xc1, 0x83, 0xc6, 0x18, + 0x48, 0xf0, 0x0f, 0x28, 0x37, 0x63, 0x84, 0xa4, 0x78, 0xf2, 0xb2, 0xd9, 0x96, 0xa9, 0xa9, 0xa5, + 0x3b, 0x4d, 0x77, 0x3d, 0x2c, 0xff, 0xc1, 0x7f, 0xe4, 0x8f, 0x33, 0x3b, 0x6d, 0x29, 0x12, 0x8f, + 0xfb, 0xe6, 0xe5, 0xcd, 0x7b, 0x6f, 0x96, 0x75, 0xb5, 0xc9, 0x40, 0x26, 0x7a, 0x9c, 0x66, 0x68, + 0x90, 0x1f, 0x14, 0xcf, 0xd1, 0x82, 0xb5, 0x97, 0xf0, 0x9e, 0x80, 0x32, 0x2f, 0x60, 0x24, 0xbf, + 0x62, 0x5d, 0x50, 0x41, 0x66, 0x53, 0x03, 0x2b, 0x11, 0x83, 0x1d, 0xd4, 0x2e, 0x6b, 0x37, 0x1d, + 0xaf, 0xb3, 0x05, 0x9f, 0xc1, 0xf2, 0x73, 0x76, 0x18, 0x83, 0x15, 0x0a, 0x55, 0x00, 0x83, 0x3a, + 0x11, 0x5a, 0x31, 0xd8, 0xb9, 0x7b, 0x8f, 0xbe, 0x6b, 0x8c, 0x2d, 0x49, 0xfc, 0x49, 0x85, 0xc8, + 0x1f, 0xd9, 0xc5, 0x0a, 0xd2, 0x0c, 0x02, 0xe9, 0x14, 0xd5, 0x67, 0xe2, 0x43, 0x26, 0x30, 0x14, + 0x3a, 0x5f, 0xaa, 0x69, 0x41, 0xc3, 0x1b, 0x56, 0xa4, 0x39, 0x71, 0x16, 0x61, 0x61, 0x4b, 0x3b, + 0x4f, 0x25, 0x5b, 0xe8, 0x68, 0x93, 0xaf, 0x6c, 0x78, 0x9d, 0x12, 0x5c, 0x46, 0x1b, 0xe0, 0xb7, + 0xec, 0x64, 0x2d, 0xb5, 0x29, 0x75, 0x73, 0x62, 0x83, 0x88, 0x3d, 0x37, 0x28, 0xd4, 0x88, 0x3b, + 0x64, 0xad, 0x04, 0x8c, 0x5c, 0x49, 0x23, 0x07, 0xff, 0x72, 0xfb, 0xe5, 0x7b, 0xf4, 0x55, 0x2f, + 0xed, 0x53, 0x1f, 0x53, 0x76, 0x56, 0xf5, 0x91, 0x77, 0x26, 0x22, 0x15, 0x62, 0xd1, 0xcb, 0xe9, + 0x76, 0xb8, 0x13, 0xf9, 0x9a, 0xf5, 0x0a, 0x38, 0x42, 0x25, 0x8c, 0x4d, 0x73, 0xc7, 0x4d, 0xef, + 0xa8, 0x82, 0x5f, 0x6d, 0x0a, 0x3b, 0xe2, 0x8e, 0xe8, 0xaf, 0x31, 0x88, 0x2b, 0xdf, 0xcd, 0xad, + 0x78, 0x84, 0x6a, 0xe6, 0x66, 0xe4, 0xfd, 0x61, 0x2f, 0xa7, 0x33, 0x4e, 0x21, 0xda, 0xd3, 0xfe, + 0xb8, 0xbc, 0xf1, 0xce, 0x45, 0x7f, 0xa5, 0xa7, 0x48, 0x77, 0x8c, 0xff, 0x71, 0x86, 0x26, 0x55, + 0x75, 0xac, 0xf6, 0xca, 0x9f, 0xf5, 0xdf, 0xb8, 0x36, 0x98, 0x7d, 0x8c, 0x23, 0x9c, 0x04, 0x98, + 0x24, 0xa8, 0x26, 0xa9, 0xef, 0xff, 0xa7, 0x5f, 0x74, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x0d, + 0x06, 0x13, 0x61, 0x56, 0x02, 0x00, 0x00, +} diff --git a/vendor/storj.io/common/pb/streams.proto b/vendor/storj.io/common/pb/streams.proto new file mode 100644 index 000000000..d4fdd8f1a --- /dev/null +++ b/vendor/storj.io/common/pb/streams.proto @@ -0,0 +1,27 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package streams; + +message SegmentMeta { + bytes encrypted_key = 1; + bytes key_nonce = 2; +} + +message StreamInfo { + int64 deprecated_number_of_segments = 1; + int64 segments_size = 2; + int64 last_segment_size = 3; + bytes metadata = 4; +} + +message StreamMeta { + bytes encrypted_stream_info = 1; + int32 encryption_type = 2; + int32 encryption_block_size = 3; + SegmentMeta last_segment_meta = 4; + int64 number_of_segments = 5; +} diff --git a/vendor/storj.io/common/pb/types.go b/vendor/storj.io/common/pb/types.go new file mode 100644 index 000000000..970a0eddb --- /dev/null +++ b/vendor/storj.io/common/pb/types.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pb + +import "storj.io/common/storj" + +// Path represents a object path +type Path = storj.Path + +// NodeID is an alias to storj.NodeID for use in generated protobuf code +type NodeID = storj.NodeID + +// NodeIDList is an alias to storj.NodeIDList for use in generated protobuf code +type NodeIDList = storj.NodeIDList + +// PieceID is an alias to storj.PieceID for use in generated protobuf code +type PieceID = storj.PieceID + +// PiecePublicKey is an alias to storj.PiecePublicKey for use in generated protobuf code +type PiecePublicKey = storj.PiecePublicKey + +// PiecePrivateKey is an alias to storj.PiecePrivateKey for use in generated protobuf code +type PiecePrivateKey = storj.PiecePrivateKey + +// SerialNumber is an alias to storj.SerialNumber for use in generated protobuf code +type SerialNumber = storj.SerialNumber + +// StreamID is an alias to storj.StreamID for use in generated protobuf code +type StreamID = storj.StreamID + +// Nonce is an alias to storj.Nonce for use in generated protobuf code +type Nonce = storj.Nonce + +// SegmentID is an alias to storj.SegmentID for use in generated protobuf code +type SegmentID = storj.SegmentID diff --git a/vendor/storj.io/common/pb/utils.go b/vendor/storj.io/common/pb/utils.go new file mode 100644 index 000000000..3ec02eabf --- /dev/null +++ b/vendor/storj.io/common/pb/utils.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pb + +import ( + "bytes" + "reflect" + + "github.com/gogo/protobuf/proto" + + "storj.io/common/storj" +) + +// Equal compares two Protobuf messages via serialization +func Equal(msg1, msg2 proto.Message) bool { + //reflect.DeepEqual and proto.Equal don't seem work in all cases + //todo: see how slow this is compared to custom equality checks + if msg1 == nil { + return msg2 == nil + } + if reflect.TypeOf(msg1) != reflect.TypeOf(msg2) { + return false + } + msg1Bytes, err := Marshal(msg1) + if err != nil { + return false + } + msg2Bytes, err := Marshal(msg2) + if err != nil { + return false + } + return bytes.Equal(msg1Bytes, msg2Bytes) +} + +// NodesToIDs extracts Node-s into a list of ids +func NodesToIDs(nodes []*Node) storj.NodeIDList { + ids := make(storj.NodeIDList, len(nodes)) + for i, node := range nodes { + if node != nil { + ids[i] = node.Id + } + } + return ids +} + +// CopyNode returns a deep copy of a node +// It would be better to use `proto.Clone` but it is curently incompatible +// with gogo's customtype extension. +// (see https://github.com/gogo/protobuf/issues/147) +func CopyNode(src *Node) (dst *Node) { + node := Node{Id: storj.NodeID{}} + copy(node.Id[:], src.Id[:]) + + if src.Address != nil { + node.Address = &NodeAddress{ + Transport: src.Address.Transport, + Address: src.Address.Address, + } + } + + return &node +} + +// AddressEqual compares two node addresses +func AddressEqual(a1, a2 *NodeAddress) bool { + if a1 == nil && a2 == nil { + return true + } + if a1 == nil || a2 == nil { + return false + } + return a1.Transport == a2.Transport && + a1.Address == a2.Address +} + +// NewRedundancySchemeToStorj creates new storj.RedundancyScheme from the given +// protobuf RedundancyScheme. +func NewRedundancySchemeToStorj(scheme *RedundancyScheme) *storj.RedundancyScheme { + return &storj.RedundancyScheme{ + Algorithm: storj.RedundancyAlgorithm(scheme.GetType()), + ShareSize: scheme.GetErasureShareSize(), + RequiredShares: int16(scheme.GetMinReq()), + RepairShares: int16(scheme.GetRepairThreshold()), + OptimalShares: int16(scheme.GetSuccessThreshold()), + TotalShares: int16(scheme.GetTotal()), + } +} diff --git a/vendor/storj.io/common/pb/vouchers.pb.go b/vendor/storj.io/common/pb/vouchers.pb.go new file mode 100644 index 000000000..a7dbbd9e1 --- /dev/null +++ b/vendor/storj.io/common/pb/vouchers.pb.go @@ -0,0 +1,290 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: vouchers.proto + +package pb + +import ( + context "context" + fmt "fmt" + math "math" + time "time" + + proto "github.com/gogo/protobuf/proto" + + drpc "storj.io/drpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type VoucherResponse_Status int32 + +const ( + VoucherResponse_INVALID VoucherResponse_Status = 0 + VoucherResponse_ACCEPTED VoucherResponse_Status = 1 + VoucherResponse_REJECTED VoucherResponse_Status = 2 +) + +var VoucherResponse_Status_name = map[int32]string{ + 0: "INVALID", + 1: "ACCEPTED", + 2: "REJECTED", +} + +var VoucherResponse_Status_value = map[string]int32{ + "INVALID": 0, + "ACCEPTED": 1, + "REJECTED": 2, +} + +func (x VoucherResponse_Status) String() string { + return proto.EnumName(VoucherResponse_Status_name, int32(x)) +} + +func (VoucherResponse_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_3659b9a115b8060d, []int{2, 0} +} + +// Voucher is a signed message verifying that a node has been vetted by a particular satellite +type Voucher struct { + SatelliteId NodeID `protobuf:"bytes,1,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"` + StorageNodeId NodeID `protobuf:"bytes,2,opt,name=storage_node_id,json=storageNodeId,proto3,customtype=NodeID" json:"storage_node_id"` + Expiration time.Time `protobuf:"bytes,3,opt,name=expiration,proto3,stdtime" json:"expiration"` + SatelliteSignature []byte `protobuf:"bytes,4,opt,name=satellite_signature,json=satelliteSignature,proto3" json:"satellite_signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Voucher) Reset() { *m = Voucher{} } +func (m *Voucher) String() string { return proto.CompactTextString(m) } +func (*Voucher) ProtoMessage() {} +func (*Voucher) Descriptor() ([]byte, []int) { + return fileDescriptor_3659b9a115b8060d, []int{0} +} +func (m *Voucher) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Voucher.Unmarshal(m, b) +} +func (m *Voucher) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Voucher.Marshal(b, m, deterministic) +} +func (m *Voucher) XXX_Merge(src proto.Message) { + xxx_messageInfo_Voucher.Merge(m, src) +} +func (m *Voucher) XXX_Size() int { + return xxx_messageInfo_Voucher.Size(m) +} +func (m *Voucher) XXX_DiscardUnknown() { + xxx_messageInfo_Voucher.DiscardUnknown(m) +} + +var xxx_messageInfo_Voucher proto.InternalMessageInfo + +func (m *Voucher) GetExpiration() time.Time { + if m != nil { + return m.Expiration + } + return time.Time{} +} + +func (m *Voucher) GetSatelliteSignature() []byte { + if m != nil { + return m.SatelliteSignature + } + return nil +} + +type VoucherRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VoucherRequest) Reset() { *m = VoucherRequest{} } +func (m *VoucherRequest) String() string { return proto.CompactTextString(m) } +func (*VoucherRequest) ProtoMessage() {} +func (*VoucherRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_3659b9a115b8060d, []int{1} +} +func (m *VoucherRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VoucherRequest.Unmarshal(m, b) +} +func (m *VoucherRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VoucherRequest.Marshal(b, m, deterministic) +} +func (m *VoucherRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoucherRequest.Merge(m, src) +} +func (m *VoucherRequest) XXX_Size() int { + return xxx_messageInfo_VoucherRequest.Size(m) +} +func (m *VoucherRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VoucherRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VoucherRequest proto.InternalMessageInfo + +type VoucherResponse struct { + Voucher *Voucher `protobuf:"bytes,1,opt,name=voucher,proto3" json:"voucher,omitempty"` + Status VoucherResponse_Status `protobuf:"varint,2,opt,name=status,proto3,enum=vouchers.VoucherResponse_Status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VoucherResponse) Reset() { *m = VoucherResponse{} } +func (m *VoucherResponse) String() string { return proto.CompactTextString(m) } +func (*VoucherResponse) ProtoMessage() {} +func (*VoucherResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_3659b9a115b8060d, []int{2} +} +func (m *VoucherResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VoucherResponse.Unmarshal(m, b) +} +func (m *VoucherResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VoucherResponse.Marshal(b, m, deterministic) +} +func (m *VoucherResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoucherResponse.Merge(m, src) +} +func (m *VoucherResponse) XXX_Size() int { + return xxx_messageInfo_VoucherResponse.Size(m) +} +func (m *VoucherResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VoucherResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VoucherResponse proto.InternalMessageInfo + +func (m *VoucherResponse) GetVoucher() *Voucher { + if m != nil { + return m.Voucher + } + return nil +} + +func (m *VoucherResponse) GetStatus() VoucherResponse_Status { + if m != nil { + return m.Status + } + return VoucherResponse_INVALID +} + +func init() { + proto.RegisterEnum("vouchers.VoucherResponse_Status", VoucherResponse_Status_name, VoucherResponse_Status_value) + proto.RegisterType((*Voucher)(nil), "vouchers.Voucher") + proto.RegisterType((*VoucherRequest)(nil), "vouchers.VoucherRequest") + proto.RegisterType((*VoucherResponse)(nil), "vouchers.VoucherResponse") +} + +func init() { proto.RegisterFile("vouchers.proto", fileDescriptor_3659b9a115b8060d) } + +var fileDescriptor_3659b9a115b8060d = []byte{ + // 382 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4f, 0x6e, 0x9b, 0x40, + 0x14, 0xc6, 0x8d, 0x5b, 0x01, 0x7a, 0xb8, 0xd8, 0x9d, 0x76, 0x41, 0xd9, 0x60, 0xb1, 0xb2, 0x54, + 0x09, 0x64, 0x2a, 0x55, 0x5d, 0xd6, 0x36, 0x2c, 0xa8, 0x2c, 0xab, 0xc2, 0x96, 0x17, 0xdd, 0x58, + 0xd8, 0x4c, 0x09, 0x91, 0x61, 0x08, 0x33, 0x44, 0x39, 0x46, 0xee, 0x91, 0x8b, 0xe4, 0x0c, 0x59, + 0x38, 0x07, 0xc8, 0x25, 0x22, 0x86, 0x3f, 0x89, 0x14, 0x67, 0xf9, 0xbd, 0xf7, 0x7b, 0x6f, 0xbe, + 0xf7, 0x0d, 0xa8, 0xd7, 0xa4, 0x3c, 0x5c, 0xe0, 0x82, 0x5a, 0x79, 0x41, 0x18, 0x41, 0x72, 0xab, + 0x75, 0x88, 0x49, 0x4c, 0xea, 0xaa, 0x6e, 0xc4, 0x84, 0xc4, 0x47, 0x6c, 0x73, 0xb5, 0x2f, 0xff, + 0xdb, 0x2c, 0x49, 0x31, 0x65, 0x61, 0x9a, 0xd7, 0x80, 0xf9, 0x24, 0x80, 0xb4, 0xad, 0x27, 0xd1, + 0x14, 0x06, 0x34, 0x64, 0xf8, 0x78, 0x4c, 0x18, 0xde, 0x25, 0x91, 0x26, 0x8c, 0x85, 0xc9, 0x60, + 0xae, 0xde, 0x9f, 0x8c, 0xde, 0xc3, 0xc9, 0x10, 0x57, 0x24, 0xc2, 0xbe, 0x1b, 0x28, 0x1d, 0xe3, + 0x47, 0xe8, 0x27, 0x0c, 0x29, 0x23, 0x45, 0x18, 0xe3, 0x5d, 0x46, 0x22, 0x3e, 0xd5, 0x3f, 0x3b, + 0xf5, 0xa9, 0xc1, 0xb8, 0x8c, 0x90, 0x0b, 0x80, 0x6f, 0xf2, 0xa4, 0x08, 0x59, 0x42, 0x32, 0xed, + 0xc3, 0x58, 0x98, 0x28, 0x8e, 0x6e, 0xd5, 0x66, 0xad, 0xd6, 0xac, 0xb5, 0x69, 0xcd, 0xce, 0xe5, + 0x6a, 0xdd, 0xed, 0xa3, 0x21, 0x04, 0xaf, 0xe6, 0x90, 0x0d, 0x5f, 0x5e, 0x0c, 0xd3, 0x24, 0xce, + 0x42, 0x56, 0x16, 0x58, 0xfb, 0x58, 0x39, 0x08, 0x50, 0xd7, 0x5a, 0xb7, 0x1d, 0x73, 0x04, 0x6a, + 0x73, 0x6c, 0x80, 0xaf, 0x4a, 0x4c, 0x99, 0x79, 0x27, 0xc0, 0xb0, 0x2b, 0xd1, 0x9c, 0x64, 0x14, + 0xa3, 0xef, 0x20, 0x35, 0x61, 0xf2, 0x08, 0x14, 0xe7, 0xb3, 0xd5, 0x85, 0xdd, 0xb2, 0x2d, 0x81, + 0x7e, 0x81, 0x48, 0x59, 0xc8, 0x4a, 0xca, 0x0f, 0x57, 0x9d, 0xf1, 0x5b, 0xb6, 0xd9, 0x6b, 0xad, + 0x39, 0x17, 0x34, 0xbc, 0x39, 0x05, 0xb1, 0xae, 0x20, 0x05, 0x24, 0x7f, 0xb5, 0x9d, 0x2d, 0x7d, + 0x77, 0xd4, 0x43, 0x03, 0x90, 0x67, 0x8b, 0x85, 0xf7, 0x77, 0xe3, 0xb9, 0x23, 0xa1, 0x52, 0x81, + 0xf7, 0xc7, 0x5b, 0x54, 0xaa, 0xef, 0x2c, 0x41, 0x6e, 0x96, 0x52, 0xf4, 0x1b, 0xa4, 0xe6, 0x08, + 0xa4, 0x9d, 0x79, 0x93, 0x77, 0xf4, 0x6f, 0xef, 0xba, 0x31, 0x7b, 0xf3, 0xaf, 0xff, 0x50, 0xf5, + 0x2b, 0x97, 0x56, 0x42, 0xec, 0x03, 0x49, 0x53, 0x92, 0xd9, 0xf9, 0x7e, 0x2f, 0xf2, 0xf8, 0x7f, + 0x3c, 0x07, 0x00, 0x00, 0xff, 0xff, 0x89, 0xab, 0x2f, 0x68, 0x61, 0x02, 0x00, 0x00, +} + +// --- DRPC BEGIN --- + +type DRPCVouchersClient interface { + DRPCConn() drpc.Conn + + Request(ctx context.Context, in *VoucherRequest) (*VoucherResponse, error) +} + +type drpcVouchersClient struct { + cc drpc.Conn +} + +func NewDRPCVouchersClient(cc drpc.Conn) DRPCVouchersClient { + return &drpcVouchersClient{cc} +} + +func (c *drpcVouchersClient) DRPCConn() drpc.Conn { return c.cc } + +func (c *drpcVouchersClient) Request(ctx context.Context, in *VoucherRequest) (*VoucherResponse, error) { + out := new(VoucherResponse) + err := c.cc.Invoke(ctx, "/vouchers.Vouchers/Request", in, out) + if err != nil { + return nil, err + } + return out, nil +} + +type DRPCVouchersServer interface { + Request(context.Context, *VoucherRequest) (*VoucherResponse, error) +} + +type DRPCVouchersDescription struct{} + +func (DRPCVouchersDescription) NumMethods() int { return 1 } + +func (DRPCVouchersDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) { + switch n { + case 0: + return "/vouchers.Vouchers/Request", + func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) { + return srv.(DRPCVouchersServer). + Request( + ctx, + in1.(*VoucherRequest), + ) + }, DRPCVouchersServer.Request, true + default: + return "", nil, nil, false + } +} + +func DRPCRegisterVouchers(mux drpc.Mux, impl DRPCVouchersServer) error { + return mux.Register(impl, DRPCVouchersDescription{}) +} + +type DRPCVouchers_RequestStream interface { + drpc.Stream + SendAndClose(*VoucherResponse) error +} + +type drpcVouchersRequestStream struct { + drpc.Stream +} + +func (x *drpcVouchersRequestStream) SendAndClose(m *VoucherResponse) error { + if err := x.MsgSend(m); err != nil { + return err + } + return x.CloseSend() +} + +// --- DRPC END --- diff --git a/vendor/storj.io/common/pb/vouchers.proto b/vendor/storj.io/common/pb/vouchers.proto new file mode 100644 index 000000000..817ee8abd --- /dev/null +++ b/vendor/storj.io/common/pb/vouchers.proto @@ -0,0 +1,39 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "storj.io/common/pb"; + +package vouchers; + +import "gogo.proto"; +import "google/protobuf/timestamp.proto"; + +// Voucher is a signed message verifying that a node has been vetted by a particular satellite +message Voucher { + bytes satellite_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + bytes storage_node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false]; + + google.protobuf.Timestamp expiration = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + + bytes satellite_signature = 4; +} + +message VoucherRequest {} + +message VoucherResponse { + enum Status { + INVALID = 0; + ACCEPTED = 1; + REJECTED = 2; + } + + Voucher voucher = 1; + + Status status = 2; +} + + +service Vouchers { + rpc Request(VoucherRequest) returns (VoucherResponse) {} +} diff --git a/vendor/storj.io/common/peertls/doc.go b/vendor/storj.io/common/peertls/doc.go new file mode 100644 index 000000000..1af75b5eb --- /dev/null +++ b/vendor/storj.io/common/peertls/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package peertls manages TLS configuration for peers. +package peertls diff --git a/vendor/storj.io/common/peertls/extensions/doc.go b/vendor/storj.io/common/peertls/extensions/doc.go new file mode 100644 index 000000000..e3f6f1877 --- /dev/null +++ b/vendor/storj.io/common/peertls/extensions/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package extensions contains extensions to TLS certificate handling. +package extensions diff --git a/vendor/storj.io/common/peertls/extensions/extensions.go b/vendor/storj.io/common/peertls/extensions/extensions.go new file mode 100644 index 000000000..c561cc4ad --- /dev/null +++ b/vendor/storj.io/common/peertls/extensions/extensions.go @@ -0,0 +1,187 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package extensions + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + + "github.com/zeebo/errs" + + "storj.io/common/peertls" + "storj.io/common/pkcrypto" +) + +const ( + // RevocationBucket is the bolt bucket to store revocation data in + RevocationBucket = "revocations" +) + +var ( + // DefaultHandlers is a slice of handlers that we use by default. + // - IDVersionHandler + DefaultHandlers HandlerFactories + + // CAWhitelistSignedLeafHandler verifies that the leaf cert of the remote peer's + // identity was signed by one of the CA certs in the whitelist. + CAWhitelistSignedLeafHandler = NewHandlerFactory( + &SignedCertExtID, caWhitelistSignedLeafHandler, + ) + + // NB: 2.999.X is reserved for "example" OIDs + // (see http://oid-info.com/get/2.999) + // 2.999.1.X -- storj general/misc. extensions + // 2.999.2.X -- storj identity extensions + + // SignedCertExtID is the asn1 object ID for a pkix extension holding a + // signature of the cert it's extending, signed by some CA (e.g. the root cert chain). + // This extensionHandler allows for an additional signature per certificate. + SignedCertExtID = ExtensionID{2, 999, 1, 1} + // RevocationExtID is the asn1 object ID for a pkix extension containing the + // most recent certificate revocation data + // for the current TLS cert chain. + RevocationExtID = ExtensionID{2, 999, 1, 2} + // IdentityVersionExtID is the asn1 object ID for a pkix extension that + // specifies the identity version of the certificate chain. + IdentityVersionExtID = ExtensionID{2, 999, 2, 1} + // IdentityPOWCounterExtID is the asn1 object ID for a pkix extension that + // specifies how many times to hash the CA public key to calculate the node ID. + IdentityPOWCounterExtID = ExtensionID{2, 999, 2, 2} + + // Error is used when an error occurs while processing an extension. + Error = errs.Class("extension error") + + // ErrVerifyCASignedLeaf is used when a signed leaf extension signature wasn't produced + // by any CA in the whitelist. + ErrVerifyCASignedLeaf = Error.New("leaf not signed by any CA in the whitelist") + // ErrUniqueExtensions is used when multiple extensions have the same Id + ErrUniqueExtensions = Error.New("extensions are not unique") +) + +// ExtensionID is an alias to an `asn1.ObjectIdentifier`. +type ExtensionID = asn1.ObjectIdentifier + +// Config is used to bind cli flags for determining which extensions will +// be used by the server +type Config struct { + Revocation bool `default:"true" help:"if true, client leaves may contain the most recent certificate revocation for the current certificate"` + WhitelistSignedLeaf bool `default:"false" help:"if true, client leaves must contain a valid \"signed certificate extension\" (NB: verified against certs in the peer ca whitelist; i.e. if true, a whitelist must be provided)"` +} + +// Options holds common options for use in handling extensions. +type Options struct { + PeerCAWhitelist []*x509.Certificate + RevocationDB RevocationDB + PeerIDVersions string +} + +// HandlerFactories is a collection of `HandlerFactory`s for convenience. +// Defines `Register` and `WithOptions` methods. +type HandlerFactories []*HandlerFactory + +// HandlerFactory holds a factory for a handler function given the passed `Options`. +// For use in handling extensions with the corresponding ExtensionID. +type HandlerFactory struct { + id *ExtensionID + factory HandlerFactoryFunc +} + +// HandlerFactoryFunc is a factory function used to build `HandlerFunc`s given +// the passed options. +type HandlerFactoryFunc func(options *Options) HandlerFunc + +// HandlerFunc takes an extension and the remote peer's certificate chains for +// use in extension handling. +type HandlerFunc func(pkix.Extension, [][]*x509.Certificate) error + +// HandlerFuncMap maps an `ExtensionID` pointer to a `HandlerFunc`. +// Because an `ExtensionID` is a pointer , one can use a new pointer to the same +// asn1 object ID constant to store multiple `HandlerFunc`s for the same +// underlying extension id value. +type HandlerFuncMap map[*ExtensionID]HandlerFunc + +// NewHandlerFactory builds a `HandlerFactory` pointer from an `ExtensionID` and a `HandlerFactoryFunc`. +func NewHandlerFactory(id *ExtensionID, handlerFactory HandlerFactoryFunc) *HandlerFactory { + return &HandlerFactory{ + id: id, + factory: handlerFactory, + } +} + +// AddExtraExtension adds one or more extensions to a certificate for serialization. +// NB: this *does not* serialize or persist the extension into the certificates's +// raw bytes. To add a persistent extension use `FullCertificateAuthority.AddExtension` +// or `ManageableIdentity.AddExtension`. +func AddExtraExtension(cert *x509.Certificate, exts ...pkix.Extension) (err error) { + if len(exts) == 0 { + return nil + } + if !uniqueExts(append(cert.ExtraExtensions, exts...)) { + return ErrUniqueExtensions + } + + for _, ext := range exts { + e := pkix.Extension{Id: ext.Id, Value: make([]byte, len(ext.Value))} + copy(e.Value, ext.Value) + cert.ExtraExtensions = append(cert.ExtraExtensions, e) + } + return nil +} + +// Register adds an extension handler factory to the list. +func (factories *HandlerFactories) Register(newHandlers ...*HandlerFactory) { + *factories = append(*factories, newHandlers...) +} + +// WithOptions builds a `HandlerFuncMap` by calling each `HandlerFactory` with +// the passed `Options` pointer and using the respective `ExtensionID` pointer +// as the key. +func (factories HandlerFactories) WithOptions(opts *Options) HandlerFuncMap { + handlerFuncMap := make(HandlerFuncMap) + for _, factory := range factories { + handlerFuncMap[factory.ID()] = factory.NewHandlerFunc(opts) + } + return handlerFuncMap +} + +// ID returns the `ExtensionID` pointer stored with this factory. This factory +// will only handle extensions that have a matching id value. +func (handlerFactory *HandlerFactory) ID() *ExtensionID { + return handlerFactory.id +} + +// NewHandlerFunc returns a new `HandlerFunc` with the passed `Options`. +func (handlerFactory *HandlerFactory) NewHandlerFunc(opts *Options) HandlerFunc { + return handlerFactory.factory(opts) +} + +func uniqueExts(exts []pkix.Extension) bool { + seen := make(map[string]struct{}, len(exts)) + for _, e := range exts { + s := e.Id.String() + if _, ok := seen[s]; ok { + return false + } + seen[s] = struct{}{} + } + return true +} + +func caWhitelistSignedLeafHandler(opts *Options) HandlerFunc { + return func(ext pkix.Extension, chains [][]*x509.Certificate) error { + if opts.PeerCAWhitelist == nil { + return Error.New("no whitelist provided") + } + + leaf := chains[0][peertls.LeafIndex] + for _, ca := range opts.PeerCAWhitelist { + err := pkcrypto.HashAndVerifySignature(ca.PublicKey, leaf.RawTBSCertificate, ext.Value) + if err == nil { + return nil + } + } + return ErrVerifyCASignedLeaf + } +} diff --git a/vendor/storj.io/common/peertls/extensions/fuzz.go b/vendor/storj.io/common/peertls/extensions/fuzz.go new file mode 100644 index 000000000..aefa84108 --- /dev/null +++ b/vendor/storj.io/common/peertls/extensions/fuzz.go @@ -0,0 +1,29 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build gofuzz + +package extensions + +// To run fuzzing tests: +// +// clone github.com/storj/fuzz-corpus +// +// Install fuzzing tools: +// GO111MODULE=off go get github.com/dvyukov/go-fuzz/... +// +// Build binaries: +// go-fuzz-build . +// +// Run with test corpus: +// go-fuzz -bin extensions-fuzz.zip -workdir $FUZZCORPUS/peertls/extensions + +// Fuzz implements a simple fuzz test for revocationDecoder. +func Fuzz(data []byte) int { + var dec revocationDecoder + _, err := dec.decode(data) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/storj.io/common/peertls/extensions/gob.go b/vendor/storj.io/common/peertls/extensions/gob.go new file mode 100644 index 000000000..2c99744d1 --- /dev/null +++ b/vendor/storj.io/common/peertls/extensions/gob.go @@ -0,0 +1,231 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package extensions + +import ( + "bytes" + "encoding/binary" + "io" + "math/bits" + + "github.com/zeebo/errs" +) + +const ( + uint64Size = 8 + firstCustomTypeID = 65 + encFirstCustomTypeID = 130 // encoded 65 +) + +// hardcoded initial part of Revocation gob encoding, its constant until Revocation struct won't change, +// contains definition of Revocation struct with fields names and types. +// https://golang.org/pkg/encoding/gob/ +var wireEncoding = []byte{ + 64, 255, 129, 3, 1, 1, 10, 82, 101, 118, 111, 99, 97, 116, 105, 111, 110, 1, 255, 130, 0, + 1, 3, 1, 9, 84, 105, 109, 101, 115, 116, 97, 109, 112, 1, 4, 0, 1, 7, 75, 101, 121, 72, + 97, 115, 104, 1, 10, 0, 1, 9, 83, 105, 103, 110, 97, 116, 117, 114, 101, 1, 10, 0, 0, 0, +} + +type revocationEncoder struct { + value *bytes.Buffer +} + +func (encoder *revocationEncoder) encode(revocation Revocation) ([]byte, error) { + encoder.value = new(bytes.Buffer) + + encoder.encodeInt(firstCustomTypeID) + delta := uint64(1) + if revocation.Timestamp != 0 { + encoder.encodeUint(delta) + encoder.encodeInt(revocation.Timestamp) + } else { + delta++ + } + + if len(revocation.KeyHash) > 0 { + encoder.encodeUint(delta) + encoder.encodeUint(uint64(len(revocation.KeyHash))) + encoder.writeBytes(revocation.KeyHash) + delta = uint64(1) + } else { + delta++ + } + + if len(revocation.Signature) > 0 { + encoder.encodeUint(delta) + encoder.encodeUint(uint64(len(revocation.Signature))) + encoder.writeBytes(revocation.Signature) + } + + encoder.encodeUint(0) + + valueLength := encoder.value.Len() + + encoder.encodeUint(uint64(valueLength)) + + value := encoder.value.Bytes() + lengthData := value[valueLength:] + valueData := value[:valueLength] + return append(wireEncoding, append(lengthData, valueData...)...), nil +} + +func (encoder *revocationEncoder) encodeInt(i int64) { + var x uint64 + if i < 0 { + x = uint64(^i<<1) | 1 + } else { + x = uint64(i << 1) + } + encoder.encodeUint(x) +} + +func (encoder *revocationEncoder) encodeUint(x uint64) { + if x <= 0x7F { + encoder.writeByte(uint8(x)) + return + } + + var stateBuf [1 + uint64Size]byte + binary.BigEndian.PutUint64(stateBuf[1:], x) + bc := bits.LeadingZeros64(x) >> 3 // 8 - bytelen(x) + stateBuf[bc] = uint8(bc - uint64Size) // and then we subtract 8 to get -bytelen(x) + + encoder.writeBytes(stateBuf[bc : uint64Size+1]) +} + +func (encoder *revocationEncoder) writeByte(x byte) { + encoder.value.WriteByte(x) +} + +func (encoder *revocationEncoder) writeBytes(x []byte) { + encoder.value.Write(x) +} + +type revocationDecoder struct { + data *bytes.Buffer +} + +func (decoder *revocationDecoder) decode(data []byte) (revocation Revocation, err error) { + decoder.data = bytes.NewBuffer(data) + + wire := make([]byte, len(wireEncoding)) + _, err = io.ReadFull(decoder.data, wire) + if err != nil { + return revocation, err + } + if !bytes.Equal(wire, wireEncoding) { + return revocation, ErrRevocation.New("invalid revocation encoding") + } + + length, err := decoder.decodeUint() + if err != nil { + return revocation, err + } + + if length != uint64(len(decoder.data.Bytes())) { + return revocation, ErrRevocation.New("invalid revocation encoding") + } + + typeID, err := decoder.decodeUint() + if err != nil { + return revocation, err + } + if typeID != encFirstCustomTypeID { + return revocation, ErrRevocation.New("invalid revocation encoding") + } + + index := uint64(0) + for { + field, err := decoder.decodeUint() + if err != nil { + return revocation, err + } + + if field == 0 { + break + } + + switch field + index { + case 1: + revocation.Timestamp, err = decoder.decodeInt() + if err != nil { + return revocation, err + } + case 2: + revocation.KeyHash, err = decoder.decodeByteArray() + if err != nil { + return revocation, err + } + case 3: + revocation.Signature, err = decoder.decodeByteArray() + if err != nil { + return revocation, err + } + default: + return revocation, errs.New("invalid field") + } + + index += field + } + + return revocation, nil +} + +func (decoder *revocationDecoder) decodeUint() (x uint64, err error) { + b, err := decoder.data.ReadByte() + if err != nil { + return 0, err + } + if b <= 0x7f { + return uint64(b), nil + } + n := -int(int8(b)) + if n > uint64Size { + return 0, errs.New("encoded unsigned integer out of range") + } + buf := make([]byte, n) + read, err := io.ReadFull(decoder.data, buf) + if err != nil { + return 0, err + } + if read < n { + return 0, errs.New("invalid uint data length %d: exceeds input size %d", n, len(buf)) + } + // Don't need to check error; it's safe to loop regardless. + // Could check that the high byte is zero but it's not worth it. + for _, b := range buf { + x = x<<8 | uint64(b) + } + return x, nil +} + +func (decoder *revocationDecoder) decodeInt() (int64, error) { + x, err := decoder.decodeUint() + if err != nil { + return 0, err + } + if x&1 != 0 { + return ^int64(x >> 1), nil + } + return int64(x >> 1), nil +} + +func (decoder *revocationDecoder) decodeByteArray() ([]byte, error) { + length, err := decoder.decodeUint() + if err != nil { + return nil, err + } + + n := int(length) + if uint64(n) != length || decoder.data.Len() < n { + return nil, errs.New("invalid array length: %d", length) + } + + buf := make([]byte, n) + _, err = io.ReadFull(decoder.data, buf) + if err != nil { + return nil, err + } + return buf, nil +} diff --git a/vendor/storj.io/common/peertls/extensions/revocations.go b/vendor/storj.io/common/peertls/extensions/revocations.go new file mode 100644 index 000000000..92019cda2 --- /dev/null +++ b/vendor/storj.io/common/peertls/extensions/revocations.go @@ -0,0 +1,177 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package extensions + +import ( + "bytes" + "context" + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "encoding/binary" + "time" + + "github.com/zeebo/errs" + + "storj.io/common/peertls" + "storj.io/common/pkcrypto" +) + +var ( + // RevocationCheckHandler ensures that a remote peer's certificate chain + // doesn't contain any revoked certificates. + RevocationCheckHandler = NewHandlerFactory(&RevocationExtID, revocationChecker) + // RevocationUpdateHandler looks for certificate revocation extensions on a + // remote peer's certificate chain, adding them to the revocation DB if valid. + RevocationUpdateHandler = NewHandlerFactory(&RevocationExtID, revocationUpdater) +) + +// ErrRevocation is used when an error occurs involving a certificate revocation +var ErrRevocation = errs.Class("revocation processing error") + +// ErrRevocationDB is used when an error occurs involving the revocations database +var ErrRevocationDB = errs.Class("revocation database error") + +// ErrRevokedCert is used when a certificate in the chain is revoked and not expected to be +var ErrRevokedCert = ErrRevocation.New("a certificate in the chain is revoked") + +// ErrRevocationTimestamp is used when a revocation's timestamp is older than the last recorded revocation +var ErrRevocationTimestamp = Error.New("revocation timestamp is older than last known revocation") + +// Revocation represents a certificate revocation for storage in the revocation +// database and for use in a TLS extension. +type Revocation struct { + Timestamp int64 + KeyHash []byte + Signature []byte +} + +// RevocationDB stores certificate revocation data. +type RevocationDB interface { + Get(ctx context.Context, chain []*x509.Certificate) (*Revocation, error) + Put(ctx context.Context, chain []*x509.Certificate, ext pkix.Extension) error + List(ctx context.Context) ([]*Revocation, error) +} + +// NewRevocationExt generates a revocation extension for a certificate. +func NewRevocationExt(key crypto.PrivateKey, revokedCert *x509.Certificate) (pkix.Extension, error) { + nowUnix := time.Now().Unix() + + keyHash, err := peertls.DoubleSHA256PublicKey(revokedCert.PublicKey) + if err != nil { + return pkix.Extension{}, err + } + rev := Revocation{ + Timestamp: nowUnix, + KeyHash: keyHash[:], + } + + if err := rev.Sign(key); err != nil { + return pkix.Extension{}, err + } + + revBytes, err := rev.Marshal() + if err != nil { + return pkix.Extension{}, err + } + + ext := pkix.Extension{ + Id: RevocationExtID, + Value: revBytes, + } + + return ext, nil +} + +func revocationChecker(opts *Options) HandlerFunc { + return func(_ pkix.Extension, chains [][]*x509.Certificate) error { + ca, leaf := chains[0][peertls.CAIndex], chains[0][peertls.LeafIndex] + lastRev, lastRevErr := opts.RevocationDB.Get(context.TODO(), chains[0]) + if lastRevErr != nil { + return Error.Wrap(lastRevErr) + } + if lastRev == nil { + return nil + } + + nodeID, err := peertls.DoubleSHA256PublicKey(ca.PublicKey) + if err != nil { + return err + } + leafKeyHash, err := peertls.DoubleSHA256PublicKey(leaf.PublicKey) + if err != nil { + return err + } + + // NB: we trust that anything that made it into the revocation DB is valid + // (i.e. no need for further verification) + switch { + case bytes.Equal(lastRev.KeyHash, nodeID[:]): + fallthrough + case bytes.Equal(lastRev.KeyHash, leafKeyHash[:]): + return ErrRevokedCert + default: + return nil + } + } +} + +func revocationUpdater(opts *Options) HandlerFunc { + return func(ext pkix.Extension, chains [][]*x509.Certificate) error { + if err := opts.RevocationDB.Put(context.TODO(), chains[0], ext); err != nil { + return err + } + return nil + } +} + +// Verify checks if the signature of the revocation was produced by the passed cert's public key. +func (r Revocation) Verify(signingCert *x509.Certificate) error { + pubKey, ok := signingCert.PublicKey.(crypto.PublicKey) + if !ok { + return pkcrypto.ErrUnsupportedKey.New("%T", signingCert.PublicKey) + } + + data := r.TBSBytes() + if err := pkcrypto.HashAndVerifySignature(pubKey, data, r.Signature); err != nil { + return err + } + return nil +} + +// TBSBytes (ToBeSigned) returns the hash of the revoked certificate key hash +// and the timestamp (i.e. hash(hash(cert bytes) + timestamp)). +func (r *Revocation) TBSBytes() []byte { + var tsBytes [binary.MaxVarintLen64]byte + binary.PutVarint(tsBytes[:], r.Timestamp) + toHash := append(append([]byte{}, r.KeyHash...), tsBytes[:]...) + + return pkcrypto.SHA256Hash(toHash) +} + +// Sign generates a signature using the passed key and attaches it to the revocation. +func (r *Revocation) Sign(key crypto.PrivateKey) error { + data := r.TBSBytes() + sig, err := pkcrypto.HashAndSign(key, data) + if err != nil { + return err + } + r.Signature = sig + return nil +} + +// Marshal serializes a revocation to bytes +func (r Revocation) Marshal() ([]byte, error) { + return (&revocationEncoder{}).encode(r) +} + +// Unmarshal deserializes a revocation from bytes +func (r *Revocation) Unmarshal(data []byte) error { + revocation, err := (&revocationDecoder{}).decode(data) + if err != nil { + return err + } + *r = revocation + return nil +} diff --git a/vendor/storj.io/common/peertls/peertls.go b/vendor/storj.io/common/peertls/peertls.go new file mode 100644 index 000000000..db8cd9539 --- /dev/null +++ b/vendor/storj.io/common/peertls/peertls.go @@ -0,0 +1,169 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package peertls + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "io" + + "github.com/zeebo/errs" + + "storj.io/common/pkcrypto" +) + +const ( + // LeafIndex is the index of the leaf certificate in a cert chain (0) + LeafIndex = iota + // CAIndex is the index of the CA certificate in a cert chain (1) + CAIndex +) + +var ( + // ErrNotExist is used when a file or directory doesn't exist. + ErrNotExist = errs.Class("file or directory not found error") + // ErrGenerate is used when an error occurred during cert/key generation. + ErrGenerate = errs.Class("tls generation error") + // ErrTLSTemplate is used when an error occurs during tls template generation. + ErrTLSTemplate = errs.Class("tls template error") + // ErrVerifyPeerCert is used when an error occurs during `VerifyPeerCertificate`. + ErrVerifyPeerCert = errs.Class("tls peer certificate verification error") + // ErrVerifyCertificateChain is used when a certificate chain can't be verified from leaf to root + // (i.e.: each cert in the chain should be signed by the preceding cert and the root should be self-signed). + ErrVerifyCertificateChain = errs.Class("certificate chain signature verification failed") + // ErrVerifyCAWhitelist is used when a signature wasn't produced by any CA in the whitelist. + ErrVerifyCAWhitelist = errs.Class("not signed by any CA in the whitelist") +) + +// PeerCertVerificationFunc is the signature for a `*tls.Config{}`'s +// `VerifyPeerCertificate` function. +type PeerCertVerificationFunc func([][]byte, [][]*x509.Certificate) error + +// VerifyPeerFunc combines multiple `*tls.Config#VerifyPeerCertificate` +// functions and adds certificate parsing. +func VerifyPeerFunc(next ...PeerCertVerificationFunc) PeerCertVerificationFunc { + return func(chain [][]byte, _ [][]*x509.Certificate) error { + c, err := pkcrypto.CertsFromDER(chain) + if err != nil { + return NewNonTemporaryError(ErrVerifyPeerCert.Wrap(err)) + } + + for _, n := range next { + if n != nil { + if err := n(chain, [][]*x509.Certificate{c}); err != nil { + return NewNonTemporaryError(ErrVerifyPeerCert.Wrap(err)) + } + } + } + return nil + } +} + +// VerifyPeerCertChains verifies that the first certificate chain contains certificates +// which are signed by their respective parents, ending with a self-signed root. +func VerifyPeerCertChains(_ [][]byte, parsedChains [][]*x509.Certificate) error { + return verifyChainSignatures(parsedChains[0]) +} + +// VerifyCAWhitelist verifies that the peer identity's CA was signed by any one +// of the (certificate authority) certificates in the provided whitelist. +func VerifyCAWhitelist(cas []*x509.Certificate) PeerCertVerificationFunc { + if cas == nil { + return nil + } + return func(_ [][]byte, parsedChains [][]*x509.Certificate) error { + for _, ca := range cas { + err := verifyCertSignature(ca, parsedChains[0][CAIndex]) + if err == nil { + return nil + } + } + return ErrVerifyCAWhitelist.New("CA cert") + } +} + +// TLSCert creates a tls.Certificate from chains, key and leaf. +func TLSCert(chain [][]byte, leaf *x509.Certificate, key crypto.PrivateKey) (*tls.Certificate, error) { + var err error + if leaf == nil { + leaf, err = pkcrypto.CertFromDER(chain[LeafIndex]) + if err != nil { + return nil, err + } + } + + return &tls.Certificate{ + Leaf: leaf, + Certificate: chain, + PrivateKey: key, + }, nil +} + +// WriteChain writes the certificate chain (leaf-first) and extensions to the writer, PEM-encoded. +func WriteChain(w io.Writer, chain ...*x509.Certificate) error { + if len(chain) < 1 { + return errs.New("expected at least one certificate for writing") + } + + var extErrs errs.Group + for _, c := range chain { + if err := pkcrypto.WriteCertPEM(w, c); err != nil { + return errs.Wrap(err) + } + } + return extErrs.Err() +} + +// ChainBytes returns bytes of the certificate chain (leaf-first) to the writer, PEM-encoded. +func ChainBytes(chain ...*x509.Certificate) ([]byte, error) { + var data bytes.Buffer + err := WriteChain(&data, chain...) + return data.Bytes(), err +} + +// CreateSelfSignedCertificate creates a new self-signed X.509v3 certificate +// using fields from the given template. +// +// A part of the errors that CreateCertificate can return it can return +// pkcrypto.ErrUnsuportedKey error. +func CreateSelfSignedCertificate(key crypto.PrivateKey, template *x509.Certificate) (*x509.Certificate, error) { + pubKey, err := pkcrypto.PublicKeyFromPrivate(key) + if err != nil { + return nil, err + } + return CreateCertificate(pubKey, key, template, template) +} + +// CreateCertificate creates a new X.509v3 certificate based on a template. +// The new certificate: +// +// * will have the public key given as 'signee' +// * will be signed by 'signer' (which should be the private key of 'issuer') +// * will be issued by 'issuer' +// * will have metadata fields copied from 'template' +// +// Returns the new Certificate object. +func CreateCertificate(signee crypto.PublicKey, signer crypto.PrivateKey, template, issuer *x509.Certificate) (*x509.Certificate, error) { + if _, ok := signer.(crypto.Signer); !ok { + // x509.CreateCertificate will panic in this case, so check here and make debugging easier + return nil, errs.New("can't sign certificate with signer key of type %T", signer) + } + + // TODO: should we check for uniqueness? + template.ExtraExtensions = append(template.ExtraExtensions, template.Extensions...) + cb, err := x509.CreateCertificate( + rand.Reader, + template, + issuer, + signee, + signer, + ) + if err != nil { + return nil, errs.Wrap(err) + } + return pkcrypto.CertFromDER(cb) +} diff --git a/vendor/storj.io/common/peertls/templates.go b/vendor/storj.io/common/peertls/templates.go new file mode 100644 index 000000000..26992c27f --- /dev/null +++ b/vendor/storj.io/common/peertls/templates.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package peertls + +import ( + "crypto/x509" + "crypto/x509/pkix" +) + +// CATemplate returns x509.Certificate template for certificate authority +func CATemplate() (*x509.Certificate, error) { + serialNumber, err := newSerialNumber() + if err != nil { + return nil, ErrTLSTemplate.Wrap(err) + } + + template := &x509.Certificate{ + SerialNumber: serialNumber, + KeyUsage: x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + Subject: pkix.Name{Organization: []string{"Storj"}}, + } + + return template, nil +} + +// LeafTemplate returns x509.Certificate template for signing and encrypting +func LeafTemplate() (*x509.Certificate, error) { + serialNumber, err := newSerialNumber() + if err != nil { + return nil, ErrTLSTemplate.Wrap(err) + } + + template := &x509.Certificate{ + SerialNumber: serialNumber, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IsCA: false, + Subject: pkix.Name{Organization: []string{"Storj"}}, + } + + return template, nil +} diff --git a/vendor/storj.io/common/peertls/tlsopts/cert.go b/vendor/storj.io/common/peertls/tlsopts/cert.go new file mode 100644 index 000000000..820709b99 --- /dev/null +++ b/vendor/storj.io/common/peertls/tlsopts/cert.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package tlsopts + +const ( + // DefaultPeerCAWhitelist includes the production Storj network CAs + DefaultPeerCAWhitelist = `-----BEGIN CERTIFICATE----- +MIIBWzCCAQGgAwIBAgIRAK7f/E+PDEvB/TrUSaHxOEYwCgYIKoZIzj0EAwIwEDEO +MAwGA1UEChMFU3RvcmowIhgPMDAwMTAxMDEwMDAwMDBaGA8wMDAxMDEwMTAwMDAw +MFowEDEOMAwGA1UEChMFU3RvcmowWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATr +sDBAh7sr9eVZJUIFb79WK2qTcSKw/sP95JF5rCIJ5FvvwA/cx70VdW6IQjVhIaDY +llQONAD90PeoOpqSyo+iozgwNjAOBgNVHQ8BAf8EBAMCAgQwEwYDVR0lBAwwCgYI +KwYBBQUHAwEwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAgNIADBFAiEAzPdn +5ZK9hIUm+0b7iBHfk1T/O7gpwGTmsSLps4cF6KgCIDhgQ4g2givMj5Khmuhnr/e7 +z6HlDVf3PJOQv1yZqg7W +-----END CERTIFICATE-----` +) diff --git a/vendor/storj.io/common/peertls/tlsopts/config.go b/vendor/storj.io/common/peertls/tlsopts/config.go new file mode 100644 index 000000000..115d358df --- /dev/null +++ b/vendor/storj.io/common/peertls/tlsopts/config.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package tlsopts + +import ( + "storj.io/common/peertls/extensions" +) + +// Config holds tls configuration parameters +type Config struct { + RevocationDBURL string `default:"bolt://$CONFDIR/revocations.db" help:"url for revocation database (e.g. bolt://some.db OR redis://127.0.0.1:6378?db=2&password=abc123)"` + PeerCAWhitelistPath string `help:"path to the CA cert whitelist (peer identities must be signed by one these to be verified). this will override the default peer whitelist"` + UsePeerCAWhitelist bool `devDefault:"false" releaseDefault:"true" help:"if true, uses peer ca whitelist checking"` + PeerIDVersions string `default:"latest" help:"identity version(s) the server will be allowed to talk to"` + Extensions extensions.Config +} diff --git a/vendor/storj.io/common/peertls/tlsopts/doc.go b/vendor/storj.io/common/peertls/tlsopts/doc.go new file mode 100644 index 000000000..4db784d37 --- /dev/null +++ b/vendor/storj.io/common/peertls/tlsopts/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package tlsopts handles TLS server options. +package tlsopts diff --git a/vendor/storj.io/common/peertls/tlsopts/options.go b/vendor/storj.io/common/peertls/tlsopts/options.go new file mode 100644 index 000000000..a2af7d5d1 --- /dev/null +++ b/vendor/storj.io/common/peertls/tlsopts/options.go @@ -0,0 +1,194 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package tlsopts + +import ( + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "io/ioutil" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/identity" + "storj.io/common/peertls" + "storj.io/common/peertls/extensions" + "storj.io/common/pkcrypto" +) + +var ( + mon = monkit.Package() + // Error is error for tlsopts + Error = errs.Class("tlsopts error") +) + +// Options holds config, identity, and peer verification function data for use with tls. +type Options struct { + Config Config + Ident *identity.FullIdentity + RevDB extensions.RevocationDB + PeerCAWhitelist []*x509.Certificate + VerificationFuncs *VerificationFuncs + Cert *tls.Certificate +} + +// VerificationFuncs keeps track of of client and server peer certificate verification +// functions for use in tls handshakes. +type VerificationFuncs struct { + client []peertls.PeerCertVerificationFunc + server []peertls.PeerCertVerificationFunc +} + +// ExtensionMap maps `pkix.Extension`s to their respective asn1 object ID string. +type ExtensionMap map[string]pkix.Extension + +// NewOptions is a constructor for `tls options` given an identity, config, and +// revocation DB. A caller may pass a nil revocation DB if the revocation +// extension is disabled. +func NewOptions(i *identity.FullIdentity, c Config, revocationDB extensions.RevocationDB) (*Options, error) { + opts := &Options{ + Config: c, + RevDB: revocationDB, + Ident: i, + VerificationFuncs: new(VerificationFuncs), + } + + err := opts.configure() + if err != nil { + return nil, err + } + + return opts, nil +} + +// NewExtensionsMap builds an `ExtensionsMap` from the extensions in the passed certificate(s). +func NewExtensionsMap(chain ...*x509.Certificate) ExtensionMap { + extensionMap := make(ExtensionMap) + for _, cert := range chain { + for _, ext := range cert.Extensions { + extensionMap[ext.Id.String()] = ext + } + } + return extensionMap +} + +// ExtensionOptions converts options for use in extension handling. +func (opts *Options) ExtensionOptions() *extensions.Options { + return &extensions.Options{ + PeerCAWhitelist: opts.PeerCAWhitelist, + RevocationDB: opts.RevDB, + PeerIDVersions: opts.Config.PeerIDVersions, + } +} + +// configure adds peer certificate verification functions and data structures +// required for completing TLS handshakes to the options. +func (opts *Options) configure() (err error) { + if opts.Config.UsePeerCAWhitelist { + whitelist := []byte(DefaultPeerCAWhitelist) + if opts.Config.PeerCAWhitelistPath != "" { + whitelist, err = ioutil.ReadFile(opts.Config.PeerCAWhitelistPath) + if err != nil { + return Error.New("unable to find whitelist file %v: %v", opts.Config.PeerCAWhitelistPath, err) + } + } + opts.PeerCAWhitelist, err = pkcrypto.CertsFromPEM(whitelist) + if err != nil { + return Error.Wrap(err) + } + opts.VerificationFuncs.ClientAdd(peertls.VerifyCAWhitelist(opts.PeerCAWhitelist)) + } + + handlers := make(extensions.HandlerFactories, len(extensions.DefaultHandlers)) + copy(handlers, extensions.DefaultHandlers) + + if opts.Config.Extensions.Revocation { + handlers.Register( + extensions.RevocationCheckHandler, + extensions.RevocationUpdateHandler, + ) + } + + if opts.Config.Extensions.WhitelistSignedLeaf { + handlers.Register(extensions.CAWhitelistSignedLeafHandler) + } + + opts.handleExtensions(handlers) + + opts.Cert, err = peertls.TLSCert(opts.Ident.RawChain(), opts.Ident.Leaf, opts.Ident.Key) + return err +} + +// handleExtensions combines and wraps all extension handler functions into a peer +// certificate verification function. This allows extension handling via the +// `VerifyPeerCertificate` field in a `tls.Config` during a TLS handshake. +func (opts *Options) handleExtensions(handlers extensions.HandlerFactories) { + if len(handlers) == 0 { + return + } + + handlerFuncMap := handlers.WithOptions(opts.ExtensionOptions()) + + combinedHandlerFunc := func(_ [][]byte, parsedChains [][]*x509.Certificate) error { + extensionMap := NewExtensionsMap(parsedChains[0]...) + return extensionMap.HandleExtensions(handlerFuncMap, parsedChains) + } + + opts.VerificationFuncs.Add(combinedHandlerFunc) +} + +// HandleExtensions calls each `extensions.HandlerFunc` with its respective extension +// and the certificate chain where its object ID string matches the extension's. +func (extensionMap ExtensionMap) HandleExtensions(handlerFuncMap extensions.HandlerFuncMap, chain [][]*x509.Certificate) error { + for idStr, extension := range extensionMap { + for id, handlerFunc := range handlerFuncMap { + if idStr == id.String() { + err := handlerFunc(extension, chain) + if err != nil { + return Error.Wrap(err) + } + } + } + } + return nil +} + +// Client returns the client verification functions. +func (vf *VerificationFuncs) Client() []peertls.PeerCertVerificationFunc { + return vf.client +} + +// Server returns the server verification functions. +func (vf *VerificationFuncs) Server() []peertls.PeerCertVerificationFunc { + return vf.server +} + +// Add adds verification functions so the client and server lists. +func (vf *VerificationFuncs) Add(verificationFuncs ...peertls.PeerCertVerificationFunc) { + vf.ClientAdd(verificationFuncs...) + vf.ServerAdd(verificationFuncs...) +} + +// ClientAdd adds verification functions so the client list. +func (vf *VerificationFuncs) ClientAdd(verificationFuncs ...peertls.PeerCertVerificationFunc) { + verificationFuncs = removeNils(verificationFuncs) + vf.client = append(vf.client, verificationFuncs...) +} + +// ServerAdd adds verification functions so the server list. +func (vf *VerificationFuncs) ServerAdd(verificationFuncs ...peertls.PeerCertVerificationFunc) { + verificationFuncs = removeNils(verificationFuncs) + vf.server = append(vf.server, verificationFuncs...) +} + +func removeNils(verificationFuncs []peertls.PeerCertVerificationFunc) []peertls.PeerCertVerificationFunc { + result := verificationFuncs[:0] + for _, f := range verificationFuncs { + if f != nil { + result = append(result, f) + } + } + return result +} diff --git a/vendor/storj.io/common/peertls/tlsopts/tls.go b/vendor/storj.io/common/peertls/tlsopts/tls.go new file mode 100644 index 000000000..7e6b0a87f --- /dev/null +++ b/vendor/storj.io/common/peertls/tlsopts/tls.go @@ -0,0 +1,109 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package tlsopts + +import ( + "crypto/tls" + "crypto/x509" + "strings" + + "storj.io/common/identity" + "storj.io/common/peertls" + "storj.io/common/storj" +) + +// ServerTLSConfig returns a TSLConfig for use as a server in handshaking with a peer. +func (opts *Options) ServerTLSConfig() *tls.Config { + return opts.tlsConfig(true) +} + +// ClientTLSConfig returns a TSLConfig for use as a client in handshaking with a peer. +func (opts *Options) ClientTLSConfig(id storj.NodeID) *tls.Config { + return opts.tlsConfig(false, verifyIdentity(id)) +} + +// ClientTLSConfigPrefix returns a TSLConfig for use as a client in handshaking with a peer. +// The peer node id is validated to match the given prefix +func (opts *Options) ClientTLSConfigPrefix(idPrefix string) *tls.Config { + return opts.tlsConfig(false, verifyIdentityPrefix(idPrefix)) +} + +// UnverifiedClientTLSConfig returns a TLSConfig for use as a client in handshaking with +// an unknown peer. +func (opts *Options) UnverifiedClientTLSConfig() *tls.Config { + return opts.tlsConfig(false) +} + +func (opts *Options) tlsConfig(isServer bool, verificationFuncs ...peertls.PeerCertVerificationFunc) *tls.Config { + verificationFuncs = append( + []peertls.PeerCertVerificationFunc{ + peertls.VerifyPeerCertChains, + }, + verificationFuncs..., + ) + + switch isServer { + case true: + verificationFuncs = append( + verificationFuncs, + opts.VerificationFuncs.server..., + ) + case false: + verificationFuncs = append( + verificationFuncs, + opts.VerificationFuncs.client..., + ) + } + + /* #nosec G402 */ // We don't use trusted root certificates, since storage + // nodes might not have a CA signed certificate. We use node id-s for the + // verification instead, that's why we enable InsecureSkipVerify + config := &tls.Config{ + Certificates: []tls.Certificate{*opts.Cert}, + InsecureSkipVerify: true, + MinVersion: tls.VersionTLS12, + DynamicRecordSizingDisabled: true, // always start with big records + VerifyPeerCertificate: peertls.VerifyPeerFunc( + verificationFuncs..., + ), + } + + if isServer { + config.ClientAuth = tls.RequireAnyClientCert + } + + return config +} + +func verifyIdentity(id storj.NodeID) peertls.PeerCertVerificationFunc { + return func(_ [][]byte, parsedChains [][]*x509.Certificate) (err error) { + defer mon.TaskNamed("verifyIdentity")(nil)(&err) + peer, err := identity.PeerIdentityFromChain(parsedChains[0]) + if err != nil { + return err + } + + if peer.ID.String() != id.String() { + return Error.New("peer ID did not match requested ID") + } + + return nil + } +} + +func verifyIdentityPrefix(idPrefix string) peertls.PeerCertVerificationFunc { + return func(_ [][]byte, parsedChains [][]*x509.Certificate) (err error) { + defer mon.TaskNamed("verifyIdentityPrefix")(nil)(&err) + peer, err := identity.PeerIdentityFromChain(parsedChains[0]) + if err != nil { + return err + } + + if !strings.HasPrefix(peer.ID.String(), idPrefix) { + return Error.New("peer ID did not match requested ID prefix") + } + + return nil + } +} diff --git a/vendor/storj.io/common/peertls/utils.go b/vendor/storj.io/common/peertls/utils.go new file mode 100644 index 000000000..eb0d5df88 --- /dev/null +++ b/vendor/storj.io/common/peertls/utils.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package peertls + +// Many cryptography standards use ASN.1 to define their data structures, +// and Distinguished Encoding Rules (DER) to serialize those structures. +// Because DER produces binary output, it can be challenging to transmit +// the resulting files through systems, like electronic mail, that only +// support ASCII. The PEM format solves this problem by encoding the +// binary data using base64. +// (see https://en.wikipedia.org/wiki/Privacy-enhanced_Electronic_Mail) + +import ( + "crypto" + "crypto/rand" + "crypto/sha256" + "crypto/x509" + "math/big" + + "github.com/zeebo/errs" + + "storj.io/common/pkcrypto" +) + +// NonTemporaryError is an error with a `Temporary` method which always returns false. +// It is intended for use with grpc. +// +// (see https://godoc.org/google.golang.org/grpc#WithDialer +// and https://godoc.org/google.golang.org/grpc#FailOnNonTempDialError). +type NonTemporaryError struct{ error } + +// NewNonTemporaryError returns a new temporary error for use with grpc. +func NewNonTemporaryError(err error) NonTemporaryError { + return NonTemporaryError{ + error: errs.Wrap(err), + } +} + +// DoubleSHA256PublicKey returns the hash of the hash of (double-hash, SHA226) +// the binary format of the given public key. +func DoubleSHA256PublicKey(k crypto.PublicKey) ([sha256.Size]byte, error) { + kb, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return [sha256.Size]byte{}, err + } + mid := sha256.Sum256(kb) + end := sha256.Sum256(mid[:]) + return end, nil +} + +// Temporary returns false to indicate that is is a non-temporary error +func (nte NonTemporaryError) Temporary() bool { + return false +} + +// Err returns the underlying error +func (nte NonTemporaryError) Err() error { + return nte.error +} + +func verifyChainSignatures(certs []*x509.Certificate) error { + for i, cert := range certs { + j := len(certs) + if i+1 < j { + err := verifyCertSignature(certs[i+1], cert) + if err != nil { + return ErrVerifyCertificateChain.Wrap(err) + } + + continue + } + + err := verifyCertSignature(cert, cert) + if err != nil { + return ErrVerifyCertificateChain.Wrap(err) + } + + } + + return nil +} + +func verifyCertSignature(parentCert, childCert *x509.Certificate) error { + return pkcrypto.HashAndVerifySignature(parentCert.PublicKey, childCert.RawTBSCertificate, childCert.Signature) +} + +func newSerialNumber() (*big.Int, error) { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, errs.New("failed to generateServerTls serial number: %s", err.Error()) + } + + return serialNumber, nil +} diff --git a/vendor/storj.io/common/pkcrypto/common.go b/vendor/storj.io/common/pkcrypto/common.go new file mode 100644 index 000000000..9a78cd65f --- /dev/null +++ b/vendor/storj.io/common/pkcrypto/common.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pkcrypto + +import ( + "github.com/zeebo/errs" +) + +const ( + // BlockLabelEcPrivateKey is the value to define a block label of EC private key + // (which is used here only for backwards compatibility). Use a general PKCS#8 + // encoding instead. + BlockLabelEcPrivateKey = "EC PRIVATE KEY" + // BlockLabelPrivateKey is the value to define a block label of general private key + // (used for PKCS#8-encoded private keys of type RSA, ECDSA, and others). + BlockLabelPrivateKey = "PRIVATE KEY" + // BlockLabelPublicKey is the value to define a block label of general public key + // (used for PKIX-encoded public keys of type RSA, ECDSA, and others). + BlockLabelPublicKey = "PUBLIC KEY" + // BlockLabelCertificate is the value to define a block label of certificates + BlockLabelCertificate = "CERTIFICATE" + // BlockLabelExtension is the value to define a block label of certificate extensions + BlockLabelExtension = "EXTENSION" +) + +var ( + // ErrUnsupportedKey is used when key type is not supported. + ErrUnsupportedKey = errs.Class("unsupported key type") + // ErrParse is used when an error occurs while parsing a certificate or key. + ErrParse = errs.Class("unable to parse") + // ErrSign is used when something goes wrong while generating a signature. + ErrSign = errs.Class("unable to generate signature") + // ErrVerifySignature is used when a signature verification error occurs. + ErrVerifySignature = errs.Class("signature verification error") + // ErrChainLength is used when the length of a cert chain isn't what was expected + ErrChainLength = errs.Class("cert chain length error") +) diff --git a/vendor/storj.io/common/pkcrypto/doc.go b/vendor/storj.io/common/pkcrypto/doc.go new file mode 100644 index 000000000..c346a6b90 --- /dev/null +++ b/vendor/storj.io/common/pkcrypto/doc.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +/*Package pkcrypto contains a set of helper functions and constants to perform +common cryptographic operations like: + +* Signing and verification + +* Public and private key generation + +* Certification generation + +*/ +package pkcrypto diff --git a/vendor/storj.io/common/pkcrypto/encoding.go b/vendor/storj.io/common/pkcrypto/encoding.go new file mode 100644 index 000000000..ba1bb30c7 --- /dev/null +++ b/vendor/storj.io/common/pkcrypto/encoding.go @@ -0,0 +1,236 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pkcrypto + +import ( + "crypto" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "io" + "math/big" + + "github.com/zeebo/errs" +) + +// WritePublicKeyPEM writes the public key, in a PEM-enveloped +// PKIX form. +func WritePublicKeyPEM(w io.Writer, key crypto.PublicKey) error { + kb, err := PublicKeyToPKIX(key) + if err != nil { + return err + } + err = pem.Encode(w, &pem.Block{Type: BlockLabelPublicKey, Bytes: kb}) + return errs.Wrap(err) +} + +// PublicKeyToPEM encodes a public key to a PEM-enveloped PKIX form. +func PublicKeyToPEM(key crypto.PublicKey) ([]byte, error) { + kb, err := PublicKeyToPKIX(key) + if err != nil { + return nil, err + } + return pem.EncodeToMemory(&pem.Block{Type: BlockLabelPublicKey, Bytes: kb}), nil +} + +// PublicKeyToPKIX serializes a public key to a PKIX-encoded form. +func PublicKeyToPKIX(key crypto.PublicKey) ([]byte, error) { + return x509.MarshalPKIXPublicKey(key) +} + +// PublicKeyFromPKIX parses a public key from its PKIX encoding. +func PublicKeyFromPKIX(pkixData []byte) (crypto.PublicKey, error) { + return x509.ParsePKIXPublicKey(pkixData) +} + +// PublicKeyFromPEM parses a public key from its PEM-enveloped PKIX +// encoding. +func PublicKeyFromPEM(pemData []byte) (crypto.PublicKey, error) { + pb, _ := pem.Decode(pemData) + if pb == nil { + return nil, ErrParse.New("could not parse PEM encoding") + } + if pb.Type != BlockLabelPublicKey { + return nil, ErrParse.New("can not parse public key from PEM block labeled %q", pb.Type) + } + return PublicKeyFromPKIX(pb.Bytes) +} + +// WritePrivateKeyPEM writes the private key to the writer, in a PEM-enveloped +// PKCS#8 form. +func WritePrivateKeyPEM(w io.Writer, key crypto.PrivateKey) error { + kb, err := PrivateKeyToPKCS8(key) + if err != nil { + return errs.Wrap(err) + } + err = pem.Encode(w, &pem.Block{Type: BlockLabelPrivateKey, Bytes: kb}) + return errs.Wrap(err) +} + +// PrivateKeyToPEM serializes a private key to a PEM-enveloped PKCS#8 form. +func PrivateKeyToPEM(key crypto.PrivateKey) ([]byte, error) { + kb, err := PrivateKeyToPKCS8(key) + if err != nil { + return nil, errs.Wrap(err) + } + return pem.EncodeToMemory(&pem.Block{Type: BlockLabelPrivateKey, Bytes: kb}), nil +} + +// PrivateKeyToPKCS8 serializes a private key to a PKCS#8-encoded form. +func PrivateKeyToPKCS8(key crypto.PrivateKey) ([]byte, error) { + return x509.MarshalPKCS8PrivateKey(key) +} + +// PrivateKeyFromPKCS8 parses a private key from its PKCS#8 encoding. +func PrivateKeyFromPKCS8(keyBytes []byte) (crypto.PrivateKey, error) { + key, err := x509.ParsePKCS8PrivateKey(keyBytes) + if err != nil { + return nil, err + } + return crypto.PrivateKey(key), nil +} + +// PrivateKeyFromPEM parses a private key from its PEM-enveloped PKCS#8 +// encoding. +func PrivateKeyFromPEM(keyBytes []byte) (crypto.PrivateKey, error) { + pb, _ := pem.Decode(keyBytes) + if pb == nil { + return nil, ErrParse.New("could not parse PEM encoding") + } + switch pb.Type { + case BlockLabelEcPrivateKey: + return ecPrivateKeyFromASN1(pb.Bytes) + case BlockLabelPrivateKey: + return PrivateKeyFromPKCS8(pb.Bytes) + } + return nil, ErrParse.New("can not parse private key from PEM block labeled %q", pb.Type) +} + +// WriteCertPEM writes the certificate to the writer, in a PEM-enveloped DER +// encoding. +func WriteCertPEM(w io.Writer, certs ...*x509.Certificate) error { + if len(certs) == 0 { + return errs.New("no certs to encode") + } + encodeErrs := new(errs.Group) + for _, cert := range certs { + encodeErrs.Add(pem.Encode(w, &pem.Block{Type: BlockLabelCertificate, Bytes: cert.Raw})) + } + return encodeErrs.Err() +} + +// CertToPEM returns the bytes of the certificate, in a PEM-enveloped DER +// encoding. +func CertToPEM(cert *x509.Certificate) []byte { + return pem.EncodeToMemory(&pem.Block{Type: BlockLabelCertificate, Bytes: cert.Raw}) +} + +// CertToDER returns the bytes of the certificate, in a DER encoding. +// +// Note that this is fairly useless, as x509.Certificate objects are always +// supposed to have a member containing the raw DER encoding. But this is +// included for completeness with the rest of this module's API. +func CertToDER(cert *x509.Certificate) ([]byte, error) { + return cert.Raw, nil +} + +// CertFromDER parses an X.509 certificate from its DER encoding. +func CertFromDER(certDER []byte) (*x509.Certificate, error) { + return x509.ParseCertificate(certDER) +} + +// CertFromPEM parses an X.509 certificate from its PEM-enveloped DER encoding. +func CertFromPEM(certPEM []byte) (*x509.Certificate, error) { + kb, _ := pem.Decode(certPEM) + if kb == nil { + return nil, ErrParse.New("could not decode certificate as PEM") + } + if kb.Type != BlockLabelCertificate { + return nil, ErrParse.New("can not parse certificate from PEM block labeled %q", kb.Type) + } + return CertFromDER(kb.Bytes) +} + +// CertsFromDER parses an x509 certificate from each of the given byte +// slices, which should be encoded in DER. +func CertsFromDER(rawCerts [][]byte) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, len(rawCerts)) + for i, c := range rawCerts { + var err error + certs[i], err = CertFromDER(c) + if err != nil { + return nil, ErrParse.New("unable to parse certificate at index %d", i) + } + } + return certs, nil +} + +// CertsFromPEM parses a PEM chain from a single byte string (the PEM-enveloped +// certificates should be concatenated). The PEM blocks may include PKIX +// extensions. +func CertsFromPEM(pemBytes []byte) ([]*x509.Certificate, error) { + var ( + encChain encodedChain + blockErrs errs.Group + ) + for { + var pemBlock *pem.Block + pemBlock, pemBytes = pem.Decode(pemBytes) + if pemBlock == nil { + break + } + if pemBlock.Type == BlockLabelCertificate { + encChain.AddCert(pemBlock.Bytes) + } + } + if err := blockErrs.Err(); err != nil { + return nil, err + } + + return encChain.Parse() +} + +type encodedChain struct { + chain [][]byte +} + +func (e *encodedChain) AddCert(b []byte) { + e.chain = append(e.chain, b) +} + +func (e *encodedChain) Parse() ([]*x509.Certificate, error) { + chain, err := CertsFromDER(e.chain) + if err != nil { + return nil, err + } + + return chain, nil +} + +type ecdsaSignature struct { + R, S *big.Int +} + +func marshalECDSASignature(r, s *big.Int) ([]byte, error) { + return asn1.Marshal(ecdsaSignature{R: r, S: s}) +} + +func unmarshalECDSASignature(signatureBytes []byte) (r, s *big.Int, err error) { + var signature ecdsaSignature + if _, err = asn1.Unmarshal(signatureBytes, &signature); err != nil { + return nil, nil, err + } + return signature.R, signature.S, nil +} + +// ecPrivateKeyFromASN1 parses a private key from the special Elliptic Curve +// Private Key ASN.1 structure. This is here only for backward compatibility. +// Use PKCS#8 instead. +func ecPrivateKeyFromASN1(privKeyData []byte) (crypto.PrivateKey, error) { + key, err := x509.ParseECPrivateKey(privKeyData) + if err != nil { + return nil, err + } + return crypto.PrivateKey(key), nil +} diff --git a/vendor/storj.io/common/pkcrypto/hashing.go b/vendor/storj.io/common/pkcrypto/hashing.go new file mode 100644 index 000000000..815a73e65 --- /dev/null +++ b/vendor/storj.io/common/pkcrypto/hashing.go @@ -0,0 +1,21 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pkcrypto + +import ( + "hash" + + sha256 "github.com/minio/sha256-simd" +) + +// NewHash returns default hash in storj. +func NewHash() hash.Hash { + return sha256.New() +} + +// SHA256Hash calculates the SHA256 hash of the input data +func SHA256Hash(data []byte) []byte { + sum := sha256.Sum256(data) + return sum[:] +} diff --git a/vendor/storj.io/common/pkcrypto/signing.go b/vendor/storj.io/common/pkcrypto/signing.go new file mode 100644 index 000000000..a4c4466cf --- /dev/null +++ b/vendor/storj.io/common/pkcrypto/signing.go @@ -0,0 +1,172 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package pkcrypto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" + "reflect" +) + +const ( + // StorjPSSSaltLength holds the correct value for the PSS salt length + // when signing with RSA in Storj code and verifying RSA signatures + // from Storj. + StorjPSSSaltLength = rsa.PSSSaltLengthAuto + + // StorjRSAKeyBits holds the number of bits to use for new RSA keys + // by default. + StorjRSAKeyBits = 2048 +) + +var ( + authECCurve = elliptic.P256() + + pssParams = rsa.PSSOptions{ + SaltLength: StorjPSSSaltLength, + Hash: crypto.SHA256, + } +) + +// GeneratePrivateKey returns a new PrivateKey for signing messages +func GeneratePrivateKey() (crypto.PrivateKey, error) { + return GeneratePrivateECDSAKey(authECCurve) + // return GeneratePrivateRSAKey(StorjRSAKeyBits) +} + +// GeneratePrivateECDSAKey returns a new private ECDSA key for signing messages +func GeneratePrivateECDSAKey(curve elliptic.Curve) (*ecdsa.PrivateKey, error) { + return ecdsa.GenerateKey(curve, rand.Reader) +} + +// GeneratePrivateRSAKey returns a new private RSA key for signing messages +func GeneratePrivateRSAKey(bits int) (*rsa.PrivateKey, error) { + return rsa.GenerateKey(rand.Reader, bits) +} + +// HashAndVerifySignature checks that signature was made by the private key +// corresponding to the given public key, over a SHA-256 digest of the given +// data. It returns an error if verification fails, or nil otherwise. +func HashAndVerifySignature(key crypto.PublicKey, data, signature []byte) error { + digest := SHA256Hash(data) + return VerifySignatureWithoutHashing(key, digest, signature) +} + +// VerifySignatureWithoutHashing checks the signature against the passed data +// (which is normally a digest) and public key. It returns an error if +// verification fails, or nil otherwise. +func VerifySignatureWithoutHashing(pubKey crypto.PublicKey, digest, signature []byte) error { + switch key := pubKey.(type) { + case *ecdsa.PublicKey: + return verifyECDSASignatureWithoutHashing(key, digest, signature) + case *rsa.PublicKey: + return verifyRSASignatureWithoutHashing(key, digest, signature) + } + return ErrUnsupportedKey.New("%T", pubKey) +} + +func verifyECDSASignatureWithoutHashing(pubKey *ecdsa.PublicKey, digest, signatureBytes []byte) error { + r, s, err := unmarshalECDSASignature(signatureBytes) + if err != nil { + return ErrVerifySignature.New("unable to unmarshal ecdsa signature: %v", err) + } + if !ecdsa.Verify(pubKey, digest, r, s) { + return ErrVerifySignature.New("signature is not valid") + } + return nil +} + +func verifyRSASignatureWithoutHashing(pubKey *rsa.PublicKey, digest, signatureBytes []byte) error { + err := rsa.VerifyPSS(pubKey, pssParams.Hash, digest, signatureBytes, &pssParams) + if err != nil { + return ErrVerifySignature.New("signature is not valid") + } + return nil +} + +// PublicKeyFromPrivate returns the public key corresponding to a given private +// key. +// It returns an error if the key isn't of an accepted implementation. +func PublicKeyFromPrivate(privKey crypto.PrivateKey) (crypto.PublicKey, error) { + switch key := privKey.(type) { + case *ecdsa.PrivateKey: + return key.Public(), nil + case *rsa.PrivateKey: + return key.Public(), nil + } + return nil, ErrUnsupportedKey.New("%T", privKey) +} + +// SignWithoutHashing signs the given digest with the private key and returns +// the new signature. +func SignWithoutHashing(privKey crypto.PrivateKey, digest []byte) ([]byte, error) { + switch key := privKey.(type) { + case *ecdsa.PrivateKey: + return signECDSAWithoutHashing(key, digest) + case *rsa.PrivateKey: + return signRSAWithoutHashing(key, digest) + } + return nil, ErrUnsupportedKey.New("%T", privKey) +} + +func signECDSAWithoutHashing(privKey *ecdsa.PrivateKey, digest []byte) ([]byte, error) { + r, s, err := ecdsa.Sign(rand.Reader, privKey, digest) + if err != nil { + return nil, ErrSign.Wrap(err) + } + return marshalECDSASignature(r, s) +} + +func signRSAWithoutHashing(privKey *rsa.PrivateKey, digest []byte) ([]byte, error) { + return privKey.Sign(rand.Reader, digest, &pssParams) +} + +// HashAndSign signs a SHA-256 digest of the given data and returns the new +// signature. +func HashAndSign(key crypto.PrivateKey, data []byte) ([]byte, error) { + digest := SHA256Hash(data) + signature, err := SignWithoutHashing(key, digest) + if err != nil { + return nil, ErrSign.Wrap(err) + } + return signature, nil +} + +// PublicKeyEqual returns true if two public keys are the same. +func PublicKeyEqual(a, b crypto.PublicKey) bool { + switch aConcrete := a.(type) { + case *ecdsa.PublicKey: + bConcrete, ok := b.(*ecdsa.PublicKey) + if !ok { + return false + } + return publicECDSAKeyEqual(aConcrete, bConcrete) + case *rsa.PublicKey: + bConcrete, ok := b.(*rsa.PublicKey) + if !ok { + return false + } + return publicRSAKeyEqual(aConcrete, bConcrete) + } + // a best-effort here is probably better than adding an err return + return reflect.DeepEqual(a, b) +} + +// publicECDSAKeyEqual returns true if two ECDSA public keys are the same. +func publicECDSAKeyEqual(a, b *ecdsa.PublicKey) bool { + return a.Curve == b.Curve && bigIntEq(a.X, b.X) && bigIntEq(a.Y, b.Y) +} + +// publicRSAKeyEqual returns true if two RSA public keys are the same. +func publicRSAKeyEqual(a, b *rsa.PublicKey) bool { + return bigIntEq(a.N, b.N) && a.E == b.E +} + +func bigIntEq(a, b *big.Int) bool { + return a.Cmp(b) == 0 +} diff --git a/vendor/storj.io/common/ranger/common.go b/vendor/storj.io/common/ranger/common.go new file mode 100644 index 000000000..98c25f790 --- /dev/null +++ b/vendor/storj.io/common/ranger/common.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package ranger + +import ( + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" +) + +// Error is the errs class of standard Ranger errors +var Error = errs.Class("ranger error") + +var mon = monkit.Package() diff --git a/vendor/storj.io/common/ranger/doc.go b/vendor/storj.io/common/ranger/doc.go new file mode 100644 index 000000000..4979934b9 --- /dev/null +++ b/vendor/storj.io/common/ranger/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package ranger implements lazy io.Reader and io.Writer interfaces. +package ranger diff --git a/vendor/storj.io/common/ranger/file.go b/vendor/storj.io/common/ranger/file.go new file mode 100644 index 000000000..71b145035 --- /dev/null +++ b/vendor/storj.io/common/ranger/file.go @@ -0,0 +1,81 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package ranger + +import ( + "context" + "io" + "os" + + "github.com/zeebo/errs" +) + +type fileRanger struct { + path string + size int64 +} + +// FileRanger returns a Ranger from a path. +func FileRanger(path string) (Ranger, error) { + info, err := os.Stat(path) + if err != nil { + return nil, Error.Wrap(err) + } + return &fileRanger{path: path, size: info.Size()}, nil +} + +func (rr *fileRanger) Size() int64 { + return rr.size +} + +func (rr *fileRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + if offset < 0 { + return nil, Error.New("negative offset") + } + if length < 0 { + return nil, Error.New("negative length") + } + if offset+length > rr.size { + return nil, Error.New("range beyond end") + } + + fh, err := os.Open(rr.path) + if err != nil { + return nil, Error.Wrap(err) + } + _, err = fh.Seek(offset, io.SeekStart) + if err != nil { + return nil, Error.Wrap(errs.Combine(err, fh.Close())) + } + + return &FileReader{fh, length}, nil +} + +// FileReader implements limit reader with io.EOF only on last read. +type FileReader struct { + file *os.File + remaining int64 +} + +// Read reads from the underlying file. +func (reader *FileReader) Read(data []byte) (n int, err error) { + if reader.remaining <= 0 { + return 0, io.EOF + } + if int64(len(data)) > reader.remaining { + data = data[0:reader.remaining] + } + n, err = reader.file.Read(data) + reader.remaining -= int64(n) + if err == io.EOF && reader.remaining == 0 { + err = nil + } + return +} + +// Close closes the underlying file. +func (reader *FileReader) Close() error { + return reader.file.Close() +} diff --git a/vendor/storj.io/common/ranger/reader.go b/vendor/storj.io/common/ranger/reader.go new file mode 100644 index 000000000..14a3d3b04 --- /dev/null +++ b/vendor/storj.io/common/ranger/reader.go @@ -0,0 +1,117 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package ranger + +import ( + "bytes" + "context" + "io" + "io/ioutil" + + "storj.io/common/readcloser" +) + +// A Ranger is a flexible data stream type that allows for more effective +// pipelining during seeking. A Ranger can return multiple parallel Readers for +// any subranges. +type Ranger interface { + Size() int64 + Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) +} + +// ByteRanger turns a byte slice into a Ranger +type ByteRanger []byte + +// Size implements Ranger.Size +func (b ByteRanger) Size() int64 { return int64(len(b)) } + +// Range implements Ranger.Range +func (b ByteRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + if offset < 0 { + return nil, Error.New("negative offset") + } + if length < 0 { + return nil, Error.New("negative length") + } + if offset+length > int64(len(b)) { + return nil, Error.New("buffer runoff") + } + + return ioutil.NopCloser(bytes.NewReader(b[offset : offset+length])), nil +} + +type concatReader struct { + r1 Ranger + r2 Ranger +} + +func (c *concatReader) Size() int64 { + return c.r1.Size() + c.r2.Size() +} + +func (c *concatReader) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + r1Size := c.r1.Size() + if offset+length <= r1Size { + return c.r1.Range(ctx, offset, length) + } + if offset >= r1Size { + return c.r2.Range(ctx, offset-r1Size, length) + } + r1Range, err := c.r1.Range(ctx, offset, r1Size-offset) + if err != nil { + return nil, err + } + return readcloser.MultiReadCloser( + r1Range, + readcloser.LazyReadCloser(func() (io.ReadCloser, error) { + return c.r2.Range(ctx, 0, length-(r1Size-offset)) + })), nil +} + +func concat2(r1, r2 Ranger) Ranger { + return &concatReader{r1: r1, r2: r2} +} + +// Concat concatenates Rangers +func Concat(r ...Ranger) Ranger { + switch len(r) { + case 0: + return ByteRanger(nil) + case 1: + return r[0] + case 2: + return concat2(r[0], r[1]) + default: + mid := len(r) / 2 + return concat2(Concat(r[:mid]...), Concat(r[mid:]...)) + } +} + +type subrange struct { + r Ranger + offset, length int64 +} + +// Subrange returns a subset of a Ranger. +func Subrange(data Ranger, offset, length int64) (Ranger, error) { + dSize := data.Size() + if offset < 0 || offset > dSize { + return nil, Error.New("invalid offset") + } + if length+offset > dSize { + return nil, Error.New("invalid length") + } + return &subrange{r: data, offset: offset, length: length}, nil +} + +func (s *subrange) Size() int64 { + return s.length +} + +func (s *subrange) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + return s.r.Range(ctx, offset+s.offset, length) +} diff --git a/vendor/storj.io/common/ranger/readerat.go b/vendor/storj.io/common/ranger/readerat.go new file mode 100644 index 000000000..cd44367ec --- /dev/null +++ b/vendor/storj.io/common/ranger/readerat.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package ranger + +import ( + "context" + "io" +) + +type readerAtRanger struct { + r io.ReaderAt + size int64 +} + +// ReaderAtRanger converts a ReaderAt with a given size to a Ranger +func ReaderAtRanger(r io.ReaderAt, size int64) Ranger { + return &readerAtRanger{ + r: r, + size: size, + } +} + +func (r *readerAtRanger) Size() int64 { + return r.size +} + +type readerAtReader struct { + r io.ReaderAt + offset, length int64 +} + +func (r *readerAtRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + if offset < 0 { + return nil, Error.New("negative offset") + } + if length < 0 { + return nil, Error.New("negative length") + } + if offset+length > r.size { + return nil, Error.New("buffer runoff") + } + return &readerAtReader{r: r.r, offset: offset, length: length}, nil +} + +func (r *readerAtReader) Read(p []byte) (n int, err error) { + if r.length == 0 { + return 0, io.EOF + } + if int64(len(p)) > r.length { + p = p[:r.length] + } + n, err = r.r.ReadAt(p, r.offset) + r.offset += int64(n) + r.length -= int64(n) + return n, err +} + +func (r *readerAtReader) Close() error { + return nil +} diff --git a/vendor/storj.io/common/readcloser/doc.go b/vendor/storj.io/common/readcloser/doc.go new file mode 100644 index 000000000..7cbda60a7 --- /dev/null +++ b/vendor/storj.io/common/readcloser/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package readcloser implements utilities for io.ReadClosers. +package readcloser diff --git a/vendor/storj.io/common/readcloser/fatal.go b/vendor/storj.io/common/readcloser/fatal.go new file mode 100644 index 000000000..0bd3358ff --- /dev/null +++ b/vendor/storj.io/common/readcloser/fatal.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package readcloser + +import "io" + +// FatalReadCloser returns a ReadCloser that always fails with err. +func FatalReadCloser(err error) io.ReadCloser { + return &fatalReadCloser{Err: err} +} + +type fatalReadCloser struct { + Err error +} + +func (f *fatalReadCloser) Read(p []byte) (n int, err error) { + return 0, f.Err +} + +func (f *fatalReadCloser) Close() error { + return nil +} diff --git a/vendor/storj.io/common/readcloser/lazy.go b/vendor/storj.io/common/readcloser/lazy.go new file mode 100644 index 000000000..2aab2c542 --- /dev/null +++ b/vendor/storj.io/common/readcloser/lazy.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package readcloser + +import "io" + +// LazyReadCloser returns an ReadCloser that doesn't initialize the backing +// Reader until the first Read. +func LazyReadCloser(reader func() (io.ReadCloser, error)) io.ReadCloser { + return &lazyReadCloser{fn: reader} +} + +type lazyReadCloser struct { + fn func() (io.ReadCloser, error) + r io.ReadCloser +} + +func (l *lazyReadCloser) Read(p []byte) (n int, err error) { + if l.r == nil { + l.r, err = l.fn() + if err != nil { + return 0, err + } + l.fn = nil + } + return l.r.Read(p) +} + +func (l *lazyReadCloser) Close() error { + if l.r != nil { + return l.r.Close() + } + return nil +} diff --git a/vendor/storj.io/common/readcloser/limit.go b/vendor/storj.io/common/readcloser/limit.go new file mode 100644 index 000000000..b4493a33d --- /dev/null +++ b/vendor/storj.io/common/readcloser/limit.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package readcloser + +import "io" + +// LimitReadCloser is a LimitReader extension that returns a ReadCloser +// that reads from r but stops with EOF after n bytes. +func LimitReadCloser(r io.ReadCloser, n int64) io.ReadCloser { + return &limitedReadCloser{io.LimitReader(r, n), r} +} + +type limitedReadCloser struct { + R io.Reader + C io.Closer +} + +func (l *limitedReadCloser) Read(p []byte) (n int, err error) { + return l.R.Read(p) +} + +func (l *limitedReadCloser) Close() error { + return l.C.Close() +} diff --git a/vendor/storj.io/common/readcloser/multi.go b/vendor/storj.io/common/readcloser/multi.go new file mode 100644 index 000000000..76779599e --- /dev/null +++ b/vendor/storj.io/common/readcloser/multi.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package readcloser + +import ( + "io" + + "github.com/zeebo/errs" +) + +type eofReadCloser struct{} + +func (eofReadCloser) Read([]byte) (int, error) { + return 0, io.EOF +} + +func (eofReadCloser) Close() error { + return nil +} + +type multiReadCloser struct { + readers []io.ReadCloser +} + +// MultiReadCloser is a MultiReader extension that returns a ReaderCloser +// that's the logical concatenation of the provided input readers. +// They're read sequentially. Once all inputs have returned EOF, +// Read will return EOF. If any of the readers return a non-nil, +// non-EOF error, Read will return that error. +func MultiReadCloser(readers ...io.ReadCloser) io.ReadCloser { + r := make([]io.ReadCloser, len(readers)) + copy(r, readers) + return &multiReadCloser{r} +} + +func (mr *multiReadCloser) Read(p []byte) (n int, err error) { + for len(mr.readers) > 0 { + // Optimization to flatten nested multiReaders. + if len(mr.readers) == 1 { + if r, ok := mr.readers[0].(*multiReadCloser); ok { + mr.readers = r.readers + continue + } + } + n, err = mr.readers[0].Read(p) + if err == io.EOF { + err = mr.readers[0].Close() + // Use eofReader instead of nil to avoid nil panic + // after performing flatten (Issue 18232). + mr.readers[0] = eofReadCloser{} // permit earlier GC + mr.readers = mr.readers[1:] + } + if n > 0 || err != io.EOF { + if err == io.EOF && len(mr.readers) > 0 { + // Don't return EOF yet. More readers remain. + err = nil + } + return + } + } + return 0, io.EOF +} + +func (mr *multiReadCloser) Close() error { + errlist := make([]error, len(mr.readers)) + for i, r := range mr.readers { + errlist[i] = r.Close() + } + return errs.Combine(errlist...) +} diff --git a/vendor/storj.io/common/rpc/common.go b/vendor/storj.io/common/rpc/common.go new file mode 100644 index 000000000..cd2935174 --- /dev/null +++ b/vendor/storj.io/common/rpc/common.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package rpc + +import ( + "net" + "time" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/memory" +) + +//go:generate go run gen.go ../pb drpc alias.go + +const ( + // IsDRPC is true if drpc is being used. + IsDRPC = true + + // IsGRPC is true if grpc is being used. + IsGRPC = false +) + +var mon = monkit.Package() + +// Error wraps all of the errors returned by this package. +var Error = errs.Class("rpccompat") + +// timedConn wraps a net.Conn so that all reads and writes get the specified timeout and +// return bytes no faster than the rate. If the timeout or rate are zero, they are +// ignored. +type timedConn struct { + net.Conn + rate memory.Size +} + +// now returns time.Now if there's a nonzero rate. +func (t *timedConn) now() (now time.Time) { + if t.rate > 0 { + now = time.Now() + } + return now +} + +// delay ensures that we sleep to keep the rate if it is nonzero. n is the number of +// bytes in the read or write operation we need to delay. +func (t *timedConn) delay(start time.Time, n int) { + if t.rate > 0 { + expected := time.Duration(n * int(time.Second) / t.rate.Int()) + if actual := time.Since(start); expected > actual { + time.Sleep(expected - actual) + } + } +} + +// Read wraps the connection read and adds sleeping to ensure the rate. +func (t *timedConn) Read(p []byte) (int, error) { + start := t.now() + n, err := t.Conn.Read(p) + t.delay(start, n) + return n, err +} + +// Write wraps the connection write and adds sleeping to ensure the rate. +func (t *timedConn) Write(p []byte) (int, error) { + start := t.now() + n, err := t.Conn.Write(p) + t.delay(start, n) + return n, err +} diff --git a/vendor/storj.io/common/rpc/conn.go b/vendor/storj.io/common/rpc/conn.go new file mode 100644 index 000000000..15214847e --- /dev/null +++ b/vendor/storj.io/common/rpc/conn.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package rpc + +import ( + "crypto/tls" + + "storj.io/common/identity" + "storj.io/drpc" +) + +// Conn is a wrapper around a drpc client connection. +type Conn struct { + state tls.ConnectionState + drpc.Conn +} + +// Close closes the connection. +func (c *Conn) Close() error { return c.Conn.Close() } + +// ConnectionState returns the tls connection state. +func (c *Conn) ConnectionState() tls.ConnectionState { return c.state } + +// PeerIdentity returns the peer identity on the other end of the connection. +func (c *Conn) PeerIdentity() (*identity.PeerIdentity, error) { + return identity.PeerIdentityFromChain(c.state.PeerCertificates) +} + +// Raw returns the underlying connection. +func (c *Conn) Raw() drpc.Conn { + return c.Conn +} diff --git a/vendor/storj.io/common/rpc/dial.go b/vendor/storj.io/common/rpc/dial.go new file mode 100644 index 000000000..7ca3221c7 --- /dev/null +++ b/vendor/storj.io/common/rpc/dial.go @@ -0,0 +1,367 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package rpc + +import ( + "context" + "crypto/tls" + "net" + "strings" + "sync" + "time" + + "github.com/zeebo/errs" + "go.uber.org/zap" + + "storj.io/common/memory" + "storj.io/common/netutil" + "storj.io/common/pb" + "storj.io/common/peertls/tlsopts" + "storj.io/common/rpc/rpcpool" + "storj.io/common/rpc/rpctracing" + "storj.io/common/storj" + "storj.io/drpc" + "storj.io/drpc/drpcconn" + "storj.io/drpc/drpcmanager" + "storj.io/drpc/drpcstream" +) + +// NewDefaultManagerOptions returns the default options we use for drpc managers. +func NewDefaultManagerOptions() drpcmanager.Options { + return drpcmanager.Options{ + WriterBufferSize: 1024, + Stream: drpcstream.Options{ + SplitSize: (4096 * 2) - 256, + }, + } +} + +// Dialer holds configuration for dialing. +type Dialer struct { + // TLSOptions controls the tls options for dialing. If it is nil, only + // insecure connections can be made. + TLSOptions *tlsopts.Options + + // DialTimeout causes all the tcp dials to error if they take longer + // than it if it is non-zero. + DialTimeout time.Duration + + // DialLatency sleeps this amount if it is non-zero before every dial. + // The timeout runs while the sleep is happening. + DialLatency time.Duration + + // TransferRate limits all read/write operations to go slower than + // the size per second if it is non-zero. + TransferRate memory.Size + + // PoolOptions controls options for the connection pool. + PoolOptions rpcpool.Options + + // ConnectionOptions controls the options that we pass to drpc connections. + ConnectionOptions drpcconn.Options + + // TCPUserTimeout controls what setting to use for the TCP_USER_TIMEOUT + // socket option on dialed connections. Only valid on linux. Only set + // if positive. + TCPUserTimeout time.Duration +} + +// NewDefaultDialer returns a Dialer with default timeouts set. +func NewDefaultDialer(tlsOptions *tlsopts.Options) Dialer { + return Dialer{ + TLSOptions: tlsOptions, + DialTimeout: 20 * time.Second, + TCPUserTimeout: 15 * time.Minute, + PoolOptions: rpcpool.Options{ + Capacity: 5, + IdleExpiration: 2 * time.Minute, + }, + ConnectionOptions: drpcconn.Options{ + Manager: NewDefaultManagerOptions(), + }, + } +} + +// dialContext does a raw tcp dial to the address and wraps the connection with the +// provided timeout. +func (d Dialer) dialContext(ctx context.Context, address string) (net.Conn, error) { + if d.DialLatency > 0 { + timer := time.NewTimer(d.DialLatency) + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return nil, Error.Wrap(ctx.Err()) + } + } + + conn, err := new(net.Dialer).DialContext(ctx, "tcp", address) + if err != nil { + // N.B. this error is not wrapped on purpose! grpc code cares about inspecting + // it and it's not smart enough to attempt to do any unwrapping. :( Additionally + // DialContext does not return an error that can be inspected easily to see if it + // came from the context being canceled. Thus, we do this racy thing where if the + // context is canceled at this point, we return it, rather than return the error + // from dialing. It's a slight lie, but arguably still correct because the cancel + // must be racing with the dial anyway. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + return nil, err + } + } + + if tcpconn, ok := conn.(*net.TCPConn); d.TCPUserTimeout > 0 && ok { + if err := netutil.SetUserTimeout(tcpconn, d.TCPUserTimeout); err != nil { + return nil, errs.Combine(Error.Wrap(err), Error.Wrap(conn.Close())) + } + } + + return &timedConn{ + Conn: netutil.TrackClose(conn), + rate: d.TransferRate, + }, nil +} + +// DialNode creates an rpc connection to the specified node. +func (d Dialer) DialNode(ctx context.Context, node *pb.Node) (_ *Conn, err error) { + if node == nil { + return nil, Error.New("node is nil") + } + + defer mon.Task()(&ctx, "node: "+node.Id.String()[0:8])(&err) + + if d.TLSOptions == nil { + return nil, Error.New("tls options not set when required for this dial") + } + + return d.dial(ctx, node.GetAddress().GetAddress(), d.TLSOptions.ClientTLSConfig(node.Id)) +} + +// DialAddressID dials to the specified address and asserts it has the given node id. +func (d Dialer) DialAddressID(ctx context.Context, address string, id storj.NodeID) (_ *Conn, err error) { + defer mon.Task()(&ctx)(&err) + + if d.TLSOptions == nil { + return nil, Error.New("tls options not set when required for this dial") + } + + return d.dial(ctx, address, d.TLSOptions.ClientTLSConfig(id)) +} + +// DialAddressInsecureBestEffort is like DialAddressInsecure but tries to dial a node securely if +// it can. +// +// nodeURL is like a storj.NodeURL but (a) requires an address and (b) does not require a +// full node id and will work with just a node prefix. The format is either: +// * node_host:node_port +// * node_id_prefix@node_host:node_port +// Examples: +// * 33.20.0.1:7777 +// * [2001:db8:1f70::999:de8:7648:6e8]:7777 +// * 12vha9oTFnerx@33.20.0.1:7777 +// * 12vha9oTFnerx@[2001:db8:1f70::999:de8:7648:6e8]:7777 +// +// DialAddressInsecureBestEffort: +// * will use a node id if provided in the nodeURL paramenter +// * will otherwise look up the node address in a known map of node address to node ids and use +// the remembered node id. +// * will otherwise dial insecurely +func (d Dialer) DialAddressInsecureBestEffort(ctx context.Context, nodeURL string) (_ *Conn, err error) { + defer mon.Task()(&ctx)(&err) + + if d.TLSOptions == nil { + return nil, Error.New("tls options not set when required for this dial") + } + + var nodeIDPrefix, nodeAddress string + parts := strings.Split(nodeURL, "@") + switch len(parts) { + default: + return nil, Error.New("malformed node url: %q", nodeURL) + case 1: + nodeAddress = parts[0] + case 2: + nodeIDPrefix, nodeAddress = parts[0], parts[1] + } + + if len(nodeIDPrefix) > 0 { + return d.dial(ctx, nodeAddress, d.TLSOptions.ClientTLSConfigPrefix(nodeIDPrefix)) + } + + if nodeID, found := KnownNodeID(nodeAddress); found { + return d.dial(ctx, nodeAddress, d.TLSOptions.ClientTLSConfig(nodeID)) + } + + zap.L().Warn(`Unknown node id for address. Specify node id in the form "node_id@node_host:node_port" for added security`, + zap.String("Address", nodeAddress), + ) + return d.dial(ctx, nodeAddress, d.TLSOptions.UnverifiedClientTLSConfig()) +} + +// DialAddressInsecure dials to the specified address and does not check the node id. +func (d Dialer) DialAddressInsecure(ctx context.Context, address string) (_ *Conn, err error) { + defer mon.Task()(&ctx)(&err) + + if d.TLSOptions == nil { + return nil, Error.New("tls options not set when required for this dial") + } + + return d.dial(ctx, address, d.TLSOptions.UnverifiedClientTLSConfig()) +} + +// DialAddressUnencrypted dials to the specified address without tls. +func (d Dialer) DialAddressUnencrypted(ctx context.Context, address string) (_ *Conn, err error) { + defer mon.Task()(&ctx)(&err) + + return d.dialUnencrypted(ctx, address) +} + +// drpcHeader is the first bytes we send on a connection so that the remote +// knows to expect drpc on the wire instead of grpc. +const drpcHeader = "DRPC!!!1" + +// dial performs the dialing to the drpc endpoint with tls. +func (d Dialer) dial(ctx context.Context, address string, tlsConfig *tls.Config) (_ *Conn, err error) { + defer mon.Task()(&ctx)(&err) + + // include the timeout here so that it includes all aspects of the dial + if d.DialTimeout > 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.DialTimeout) + defer cancel() + } + + pool := rpcpool.New(d.PoolOptions, func(ctx context.Context) (drpc.Transport, error) { + return d.dialTransport(ctx, address, tlsConfig) + }) + + conn, err := d.dialTransport(ctx, address, tlsConfig) + if err != nil { + return nil, err + } + state := conn.ConnectionState() + + if err := pool.Put(drpcconn.New(conn)); err != nil { + return nil, err + } + + return &Conn{ + state: state, + Conn: rpctracing.NewTracingWrapper(pool), + }, nil +} + +// dialTransport performs dialing to the drpc endpoint with tls. +func (d Dialer) dialTransport(ctx context.Context, address string, tlsConfig *tls.Config) (_ *tlsConnWrapper, err error) { + defer mon.Task()(&ctx)(&err) + + // open the tcp socket to the address + rawConn, err := d.dialContext(ctx, address) + if err != nil { + return nil, Error.Wrap(err) + } + rawConn = newDrpcHeaderConn(rawConn) + + // perform the handshake racing with the context closing. we use a buffer + // of size 1 so that the handshake can proceed even if no one is reading. + errCh := make(chan error, 1) + conn := tls.Client(rawConn, tlsConfig) + go func() { errCh <- conn.Handshake() }() + + // see which wins and close the raw conn if there was any error. we can't + // close the tls connection concurrently with handshakes or it sometimes + // will panic. cool, huh? + select { + case <-ctx.Done(): + err = ctx.Err() + case err = <-errCh: + } + if err != nil { + _ = rawConn.Close() + return nil, Error.Wrap(err) + } + + return &tlsConnWrapper{ + Conn: conn, + underlying: rawConn, + }, nil +} + +// dialUnencrypted performs dialing to the drpc endpoint with no tls. +func (d Dialer) dialUnencrypted(ctx context.Context, address string) (_ *Conn, err error) { + defer mon.Task()(&ctx)(&err) + + // include the timeout here so that it includes all aspects of the dial + if d.DialTimeout > 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.DialTimeout) + defer cancel() + } + + conn := rpcpool.New(d.PoolOptions, func(ctx context.Context) (drpc.Transport, error) { + return d.dialTransportUnencrypted(ctx, address) + }) + return &Conn{ + Conn: rpctracing.NewTracingWrapper(conn), + }, nil +} + +// dialTransportUnencrypted performs dialing to the drpc endpoint with no tls. +func (d Dialer) dialTransportUnencrypted(ctx context.Context, address string) (_ net.Conn, err error) { + defer mon.Task()(&ctx)(&err) + + // open the tcp socket to the address + conn, err := d.dialContext(ctx, address) + if err != nil { + return nil, Error.Wrap(err) + } + + return newDrpcHeaderConn(conn), nil +} + +// tlsConnWrapper is a wrapper around a *tls.Conn that calls Close on the +// underlying connection when closed rather than trying to send a +// notification to the other side which may block forever. +type tlsConnWrapper struct { + *tls.Conn + underlying net.Conn +} + +// Close closes the underlying connection +func (t *tlsConnWrapper) Close() error { return t.underlying.Close() } + +// drpcHeaderConn fulfills the net.Conn interface. On the first call to Write +// it will write the drpcHeader. +type drpcHeaderConn struct { + net.Conn + once sync.Once +} + +// newDrpcHeaderConn returns a new *drpcHeaderConn +func newDrpcHeaderConn(conn net.Conn) *drpcHeaderConn { + return &drpcHeaderConn{ + Conn: conn, + } +} + +// Write will write buf to the underlying conn. If this is the first time Write +// is called it will prepend the drpcHeader to the beginning of the write. +func (d *drpcHeaderConn) Write(buf []byte) (n int, err error) { + var didOnce bool + d.once.Do(func() { + didOnce = true + header := []byte(drpcHeader) + n, err = d.Conn.Write(append(header, buf...)) + }) + if didOnce { + n -= len(drpcHeader) + if n < 0 { + n = 0 + } + return n, err + } + return d.Conn.Write(buf) +} diff --git a/vendor/storj.io/common/rpc/doc.go b/vendor/storj.io/common/rpc/doc.go new file mode 100644 index 000000000..e824c5f98 --- /dev/null +++ b/vendor/storj.io/common/rpc/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpc implements dialing on Storj Network. +package rpc diff --git a/vendor/storj.io/common/rpc/known_ids.go b/vendor/storj.io/common/rpc/known_ids.go new file mode 100644 index 000000000..9f7a1e78d --- /dev/null +++ b/vendor/storj.io/common/rpc/known_ids.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package rpc + +import ( + "net" + + "storj.io/common/storj" +) + +var ( + knownNodeIDs = map[string]storj.NodeID{} +) + +func init() { + // !!!! NOTE !!!! + // + // These exist for backwards compatibility. + // + // Do not add more here, any new satellite MUST use node ID, + // Adding new satellites here will break forwards compatibility. + for _, nodeURL := range []string{ + "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777", + "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@mars.tardigrade.io:7777", + "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777", + "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@saturn.tardigrade.io:7777", + "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777", + "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@jupiter.tardigrade.io:7777", + "118UWpMCHzs6CvSgWd9BfFVjw5K9pZbJjkfZJexMtSkmKxvvAW@satellite.stefan-benten.de:7777", + "1wFTAgs9DP5RSnCqKV1eLf6N9wtk4EAtmN5DpSxcs8EjT69tGE@saltlake.tardigrade.io:7777", + } { + url, err := storj.ParseNodeURL(nodeURL) + if err != nil { + panic(err) + } + knownNodeIDs[url.Address] = url.ID + host, _, err := net.SplitHostPort(url.Address) + if err != nil { + panic(err) + } + knownNodeIDs[host] = url.ID + } +} + +// KnownNodeID looks for a well-known node id for a given address +func KnownNodeID(address string) (id storj.NodeID, known bool) { + id, known = knownNodeIDs[address] + if !known { + host, _, err := net.SplitHostPort(address) + if err != nil { + return id, false + } + id, known = knownNodeIDs[host] + } + return id, known +} diff --git a/vendor/storj.io/common/rpc/lookup.go b/vendor/storj.io/common/rpc/lookup.go new file mode 100644 index 000000000..f0170029b --- /dev/null +++ b/vendor/storj.io/common/rpc/lookup.go @@ -0,0 +1,48 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package rpc + +import ( + "context" + "net" +) + +// LookupNodeAddress resolves a storage node address to the first IP address resolved. +// If an IP address is accidentally provided it is returned back. This function +// is used to resolve storage node IP addresses so that uplinks can use +// IP addresses directly without resolving many hosts. +func LookupNodeAddress(ctx context.Context, nodeAddress string) string { + host, port, err := net.SplitHostPort(nodeAddress) + if err != nil { + // If there was an error parsing out the port we just use a plain host. + host = nodeAddress + port = "" + } + + // We check if the address is an IP address to decide if we need to resolve it or not. + ip := net.ParseIP(host) + // nodeAddress is already an IP, so we can use that. + if ip != nil { + return nodeAddress + } + + // We have a hostname not an IP address so we should resolve the IP address + // to give back to the uplink client. + addresses, err := net.DefaultResolver.LookupHost(ctx, host) + if err != nil || len(addresses) == 0 { + // We ignore the error because if this fails for some reason we can just + // re-use the hostname, it just won't be as fast for the uplink to dial. + return nodeAddress + } + + // We return the first address found because some DNS servers already do + // round robin load balancing and we would be messing with their behaviour + // if we tried to get smart here. + first := addresses[0] + + if port == "" { + return first + } + return net.JoinHostPort(first, port) +} diff --git a/vendor/storj.io/common/rpc/rpcpeer/peer.go b/vendor/storj.io/common/rpc/rpcpeer/peer.go new file mode 100644 index 000000000..6d6c0c1bc --- /dev/null +++ b/vendor/storj.io/common/rpc/rpcpeer/peer.go @@ -0,0 +1,70 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpcpeer implements context.Context peer tagging. +package rpcpeer + +import ( + "context" + "crypto/tls" + "net" + + "github.com/zeebo/errs" + + "storj.io/common/internal/grpchook" + "storj.io/drpc/drpcctx" +) + +// Error is the class of errors returned by this package. +var Error = errs.Class("rpcpeer") + +// Peer represents an rpc peer. +type Peer struct { + Addr net.Addr + State tls.ConnectionState +} + +// peerKey is used as a unique value for context keys. +type peerKey struct{} + +// NewContext returns a new context with the peer associated as a value. +func NewContext(ctx context.Context, peer *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, peer) +} + +// FromContext returns the peer that was previously associated by NewContext. +func FromContext(ctx context.Context) (*Peer, error) { + if peer, ok := ctx.Value(peerKey{}).(*Peer); ok { + return peer, nil + } else if peer, drpcErr := drpcInternalFromContext(ctx); drpcErr == nil { + return peer, nil + } else if addr, state, grpcErr := grpchook.InternalFromContext(ctx); grpcErr == nil { + return &Peer{Addr: addr, State: state}, nil + } else { + if grpcErr == grpchook.ErrNotHooked { + grpcErr = nil + } + return nil, errs.Combine(drpcErr, grpcErr) + } +} + +// drpcInternalFromContext returns a peer from the context using drpc. +func drpcInternalFromContext(ctx context.Context) (*Peer, error) { + tr, ok := drpcctx.Transport(ctx) + if !ok { + return nil, Error.New("unable to get drpc peer from context") + } + + conn, ok := tr.(interface { + RemoteAddr() net.Addr + ConnectionState() tls.ConnectionState + }) + if !ok { + return nil, Error.New("drpc transport does not have required methods") + } + + return &Peer{ + Addr: conn.RemoteAddr(), + State: conn.ConnectionState(), + }, nil +} diff --git a/vendor/storj.io/common/rpc/rpcpool/pool.go b/vendor/storj.io/common/rpc/rpcpool/pool.go new file mode 100644 index 000000000..c6d57effc --- /dev/null +++ b/vendor/storj.io/common/rpc/rpcpool/pool.go @@ -0,0 +1,210 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpcpool implements connection pooling for rpc. +package rpcpool + +import ( + "context" + "sync" + "time" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/drpc" + "storj.io/drpc/drpcconn" +) + +var mon = monkit.Package() + +// NOTE(jeff): conn expiration could remove the connection from the pool so +// that it doesn't take up a slot causing us to throw away a connection that +// we may want to keep. that adds quite a bit of complexity because channels +// do not support removing buffered elements, so it didn't seem worth it. + +// expiringConn wraps a connection +type expiringConn struct { + conn *drpcconn.Conn + timer *time.Timer +} + +// newExpiringConn wraps the connection with a timer that will close it after the +// specified duration. If the duration is non-positive, no timer is set. +func newExpiringConn(conn *drpcconn.Conn, dur time.Duration) *expiringConn { + ex := &expiringConn{conn: conn} + if dur > 0 { + ex.timer = time.AfterFunc(dur, func() { _ = conn.Close() }) + } + return ex +} + +// Cancel attempts to cancel the expiration timer and returns true if the +// timer will not close the connection. +func (ex *expiringConn) Cancel() bool { + return ex.timer == nil || ex.timer.Stop() +} + +// Options controls the options for a connection pool. +type Options struct { + // Capacity is how many connections to keep open. + Capacity int + + // IdleExpiration is how long a connection in the pool is allowed to be + // kept idle. If zero, connections do not expire. + IdleExpiration time.Duration +} + +// Error is the class of errors returned by this package. +var Error = errs.Class("rpcpool") + +// Dialer is the type of function to create a new connection. +type Dialer = func(context.Context) (drpc.Transport, error) + +// Conn implements drpc.Conn but keeps a pool of connections open. +type Conn struct { + opts Options + mu sync.Mutex + pool chan *expiringConn + done chan struct{} + dial Dialer +} + +var _ drpc.Conn = (*Conn)(nil) + +// New returns a new Conn that will keep cap connections open using the provided +// dialer when it needs new ones. +func New(opts Options, dial Dialer) *Conn { + return &Conn{ + opts: opts, + pool: make(chan *expiringConn, opts.Capacity), + done: make(chan struct{}), + dial: dial, + } +} + +// Close closes all of the pool's connections and ensures no new ones will be made. +func (c *Conn) Close() (err error) { + var pool chan *expiringConn + + // only one call will ever see a non-nil pool variable. additionally, anyone + // holding the mutex will either see a nil c.pool or a non-closed c.pool. + c.mu.Lock() + pool, c.pool = c.pool, nil + c.mu.Unlock() + + if pool != nil { + close(pool) + for ex := range pool { + if ex.Cancel() { + err = errs.Combine(err, ex.conn.Close()) + } + } + close(c.done) + } + + <-c.done + return err +} + +// newConn creates a new connection using the dialer. +func (c *Conn) newConn(ctx context.Context) (_ *drpcconn.Conn, err error) { + defer mon.Task()(&ctx)(&err) + + tr, err := c.dial(ctx) + if err != nil { + return nil, err + } + return drpcconn.New(tr), nil +} + +// getConn attempts to get a pooled connection or dials a new one if necessary. +func (c *Conn) getConn(ctx context.Context) (_ *drpcconn.Conn, err error) { + defer mon.Task()(&ctx)(&err) + + c.mu.Lock() + pool := c.pool + c.mu.Unlock() + + for { + select { + case ex, ok := <-pool: + if !ok { + return nil, Error.New("connection pool closed") + } + + // if the connection died in the pool, try again + if !ex.Cancel() || ex.conn.Closed() { + continue + } + + return ex.conn, nil + default: + return c.newConn(ctx) + } + } +} + +// Put places the connection back into the pool if there's room. It +// closes the connection if there is no room or the pool is closed. If the +// connection is closed, it does not attempt to place it into the pool. +func (c *Conn) Put(conn *drpcconn.Conn) error { + c.mu.Lock() + defer c.mu.Unlock() + + // if the connection is closed already, don't replace it. + if conn.Closed() { + return nil + } + + ex := newExpiringConn(conn, c.opts.IdleExpiration) + select { + case c.pool <- ex: + return nil + default: + if ex.Cancel() { + return conn.Close() + } + return nil + } +} + +// Transport returns nil because there is no well defined transport to use. +func (c *Conn) Transport() drpc.Transport { return nil } + +// Invoke implements drpc.Conn's Invoke method using a pooled connection. +func (c *Conn) Invoke(ctx context.Context, rpc string, in drpc.Message, out drpc.Message) (err error) { + defer mon.Task()(&ctx)(&err) + + conn, err := c.getConn(ctx) + if err != nil { + return err + } + err = conn.Invoke(ctx, rpc, in, out) + return errs.Combine(err, c.Put(conn)) +} + +// NewStream implements drpc.Conn's NewStream method using a pooled connection. It +// waits for the stream to be finished before replacing the connection into the pool. +func (c *Conn) NewStream(ctx context.Context, rpc string) (_ drpc.Stream, err error) { + defer mon.Task()(&ctx)(&err) + + conn, err := c.getConn(ctx) + if err != nil { + return nil, err + } + + stream, err := conn.NewStream(ctx, rpc) + if err != nil { + return nil, err + } + + // the stream's done channel is closed when we're sure no reads/writes are + // coming in for that stream anymore. it has been fully terminated. + go func() { + <-stream.Context().Done() + _ = c.Put(conn) + }() + + return stream, nil +} diff --git a/vendor/storj.io/common/rpc/rpcstatus/status.go b/vendor/storj.io/common/rpc/rpcstatus/status.go new file mode 100644 index 000000000..a287d83e2 --- /dev/null +++ b/vendor/storj.io/common/rpc/rpcstatus/status.go @@ -0,0 +1,118 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpcstatus contains status code definitions for rpc. +package rpcstatus + +import ( + "context" + "fmt" + + "github.com/zeebo/errs" + + "storj.io/common/internal/grpchook" + "storj.io/drpc/drpcerr" +) + +// StatusCode is an enumeration of rpc status codes. +type StatusCode uint64 + +// These constants are all the rpc error codes. It is important that +// their numerical values do not change. +const ( + Unknown StatusCode = iota + OK + Canceled + InvalidArgument + DeadlineExceeded + NotFound + AlreadyExists + PermissionDenied + ResourceExhausted + FailedPrecondition + Aborted + OutOfRange + Unimplemented + Internal + Unavailable + DataLoss + Unauthenticated +) + +// Code returns the status code associated with the error. +func Code(err error) StatusCode { + // special case: if the error is context canceled or deadline exceeded, the code + // must be those. additionally, grpc returns OK for a nil error, so we will, too. + switch err { + case nil: + return OK + case context.Canceled: + return Canceled + case context.DeadlineExceeded: + return DeadlineExceeded + default: + if code := StatusCode(drpcerr.Code(err)); code != Unknown { + return code + } + + // If we have grpc attached try to get grpc code if possible. + if grpchook.HookedConvertToStatusCode != nil { + if code, ok := grpchook.HookedConvertToStatusCode(err); ok { + return StatusCode(code) + } + } + + return Unknown + } +} + +// Wrap wraps the error with the provided status code. +func Wrap(code StatusCode, err error) error { + if err == nil { + return nil + } + + // Should we also handle grpc error status codes. + if grpchook.HookedErrorWrap != nil { + return grpchook.HookedErrorWrap(grpchook.StatusCode(code), err) + } + + ce := &codeErr{ + code: code, + } + + if ee, ok := err.(errsError); ok { + ce.errsError = ee + } else { + ce.errsError = errs.Wrap(err).(errsError) + } + + return ce +} + +// Error wraps the message with a status code into an error. +func Error(code StatusCode, msg string) error { + return Wrap(code, errs.New("%s", msg)) +} + +// Errorf : Error :: fmt.Sprintf : fmt.Sprint +func Errorf(code StatusCode, format string, a ...interface{}) error { + return Wrap(code, errs.New(format, a...)) +} + +type errsError interface { + error + fmt.Formatter + Name() (string, bool) +} + +// codeErr implements error that can work both in grpc and drpc. +type codeErr struct { + errsError + code StatusCode +} + +func (c *codeErr) Unwrap() error { return c.errsError } +func (c *codeErr) Cause() error { return c.errsError } + +func (c *codeErr) Code() uint64 { return uint64(c.code) } diff --git a/vendor/storj.io/common/rpc/rpctracing/common.go b/vendor/storj.io/common/rpc/rpctracing/common.go new file mode 100644 index 000000000..d08179c6a --- /dev/null +++ b/vendor/storj.io/common/rpc/rpctracing/common.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpctracing implements tracing for rpc. +package rpctracing + +import "github.com/spacemonkeygo/monkit/v3" + +const ( + // TraceID is the key we use to store trace id value into context. + TraceID = "trace-id" + // ParentID is the key we use to store parent's span id value into context. + ParentID = "parent-id" + // Sampled is the key we use to store sampled flag into context + Sampled = "sampled" +) + +var mon = monkit.Package() diff --git a/vendor/storj.io/common/rpc/rpctracing/handler.go b/vendor/storj.io/common/rpc/rpctracing/handler.go new file mode 100644 index 000000000..abf4479b2 --- /dev/null +++ b/vendor/storj.io/common/rpc/rpctracing/handler.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpctracing implements tracing for rpc. +package rpctracing + +import ( + "context" + + "github.com/spacemonkeygo/monkit/v3" + + "storj.io/drpc" + "storj.io/drpc/drpcmetadata" + "storj.io/drpc/drpcmux" +) + +type streamWrapper struct { + drpc.Stream + ctx context.Context +} + +func (s *streamWrapper) Context() context.Context { return s.ctx } + +type handlerFunc func(metadata map[string]string) (trace *monkit.Trace, spanID int64) + +func defaultHandlerFunc(metadata map[string]string) (*monkit.Trace, int64) { + return monkit.NewTrace(monkit.NewId()), monkit.NewId() +} + +// Handler implements drpc handler interface and takes in a callback function. +type Handler struct { + mux *drpcmux.Mux + cb handlerFunc +} + +// NewHandler returns a new instance of Handler. +func NewHandler(mux *drpcmux.Mux, cb handlerFunc) *Handler { + if cb == nil { + cb = defaultHandlerFunc + } + return &Handler{ + mux: mux, + cb: cb, + } +} + +// HandleRPC adds tracing metadata onto server stream. +func (handler *Handler) HandleRPC(stream drpc.Stream, rpc string) (err error) { + streamCtx := stream.Context() + metadata, ok := drpcmetadata.Get(streamCtx) + if ok { + trace, spanID := handler.cb(metadata) + defer mon.FuncNamed(rpc).RemoteTrace(&streamCtx, spanID, trace)(&err) + } + + return handler.mux.HandleRPC(&streamWrapper{Stream: stream, ctx: streamCtx}, rpc) +} diff --git a/vendor/storj.io/common/rpc/rpctracing/tracing.go b/vendor/storj.io/common/rpc/rpctracing/tracing.go new file mode 100644 index 000000000..33b067bca --- /dev/null +++ b/vendor/storj.io/common/rpc/rpctracing/tracing.go @@ -0,0 +1,58 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package rpctracing implements tracing for rpc. +package rpctracing + +import ( + "context" + "strconv" + + "github.com/spacemonkeygo/monkit/v3" + + "storj.io/drpc" + "storj.io/drpc/drpcmetadata" +) + +// TracingWrapper wraps a drpc.Conn with tracing information. +type TracingWrapper struct { + drpc.Conn +} + +// NewTracingWrapper creates a new instance of the wrapper. +func NewTracingWrapper(conn drpc.Conn) *TracingWrapper { + return &TracingWrapper{ + conn, + } +} + +// Invoke implements drpc.Conn's Invoke method with tracing information injected into the context. +func (c *TracingWrapper) Invoke(ctx context.Context, rpc string, in drpc.Message, out drpc.Message) (err error) { + return c.Conn.Invoke(c.trace(ctx), rpc, in, out) +} + +// NewStream implements drpc.Conn's NewStream method with tracing information injected into the context. +func (c *TracingWrapper) NewStream(ctx context.Context, rpc string) (_ drpc.Stream, err error) { + return c.Conn.NewStream(c.trace(ctx), rpc) +} + +// trace injects tracing related information into the context. +func (c *TracingWrapper) trace(ctx context.Context) context.Context { + span := monkit.SpanFromCtx(ctx) + if span == nil || span.Parent() == nil { + return ctx + } + + sampled, exist := span.Trace().Get(Sampled).(bool) + if !exist || !sampled { + return ctx + } + + data := map[string]string{ + TraceID: strconv.FormatInt(span.Trace().Id(), 10), + ParentID: strconv.FormatInt(span.Id(), 10), + Sampled: strconv.FormatBool(sampled), + } + + return drpcmetadata.AddPairs(ctx, data) +} diff --git a/vendor/storj.io/common/signing/doc.go b/vendor/storj.io/common/signing/doc.go new file mode 100644 index 000000000..a3d8f97f0 --- /dev/null +++ b/vendor/storj.io/common/signing/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package signing implements consistent signing and verifying protobuf messages. +package signing diff --git a/vendor/storj.io/common/signing/encode.go b/vendor/storj.io/common/signing/encode.go new file mode 100644 index 000000000..6b98af628 --- /dev/null +++ b/vendor/storj.io/common/signing/encode.go @@ -0,0 +1,120 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package signing + +import ( + "context" + + "storj.io/common/pb" +) + +// EncodeOrderLimit encodes order limit into bytes for signing. Removes signature from serialized limit. +func EncodeOrderLimit(ctx context.Context, limit *pb.OrderLimit) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + + // protobuf has problems with serializing types with nullable=false + // this uses a different message for signing, such that the rest of the code + // doesn't have to deal with pointers for those particular fields. + + signing := pb.OrderLimitSigning{} + signing.SerialNumber = limit.SerialNumber + signing.SatelliteId = limit.SatelliteId + if limit.DeprecatedUplinkId != nil && !limit.DeprecatedUplinkId.IsZero() { + signing.DeprecatedUplinkId = limit.DeprecatedUplinkId + } + if !limit.UplinkPublicKey.IsZero() { + signing.UplinkPublicKey = &limit.UplinkPublicKey + } + signing.StorageNodeId = limit.StorageNodeId + signing.PieceId = limit.PieceId + signing.Limit = limit.Limit + signing.Action = limit.Action + if !limit.PieceExpiration.IsZero() { + signing.PieceExpiration = &limit.PieceExpiration + } + if !limit.OrderExpiration.IsZero() { + signing.OrderExpiration = &limit.OrderExpiration + } + if !limit.OrderCreation.IsZero() { + signing.OrderCreation = &limit.OrderCreation + } + signing.SatelliteAddress = limit.SatelliteAddress + + return pb.Marshal(&signing) +} + +// EncodeOrder encodes order into bytes for signing. Removes signature from serialized order. +func EncodeOrder(ctx context.Context, order *pb.Order) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + + // protobuf has problems with serializing types with nullable=false + // this uses a different message for signing, such that the rest of the code + // doesn't have to deal with pointers for those particular fields. + + signing := pb.OrderSigning{} + signing.SerialNumber = order.SerialNumber + signing.Amount = order.Amount + + return pb.Marshal(&signing) +} + +// EncodePieceHash encodes piece hash into bytes for signing. Removes signature from serialized hash. +func EncodePieceHash(ctx context.Context, hash *pb.PieceHash) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + + // protobuf has problems with serializing types with nullable=false + // this uses a different message for signing, such that the rest of the code + // doesn't have to deal with pointers for those particular fields. + + signing := pb.PieceHashSigning{} + signing.PieceId = hash.PieceId + signing.Hash = hash.Hash + signing.PieceSize = hash.PieceSize + if !hash.Timestamp.IsZero() { + signing.Timestamp = &hash.Timestamp + } + return pb.Marshal(&signing) +} + +// EncodeStreamID encodes stream ID into bytes for signing. +func EncodeStreamID(ctx context.Context, streamID *pb.SatStreamID) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + signature := streamID.SatelliteSignature + streamID.SatelliteSignature = nil + out, err := pb.Marshal(streamID) + streamID.SatelliteSignature = signature + return out, err +} + +// EncodeSegmentID encodes segment ID into bytes for signing. +func EncodeSegmentID(ctx context.Context, segmentID *pb.SatSegmentID) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + signature := segmentID.SatelliteSignature + segmentID.SatelliteSignature = nil + out, err := pb.Marshal(segmentID) + segmentID.SatelliteSignature = signature + return out, err +} + +// EncodeExitCompleted encodes ExitCompleted into bytes for signing. +func EncodeExitCompleted(ctx context.Context, exitCompleted *pb.ExitCompleted) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + signature := exitCompleted.ExitCompleteSignature + exitCompleted.ExitCompleteSignature = nil + out, err := pb.Marshal(exitCompleted) + exitCompleted.ExitCompleteSignature = signature + + return out, err +} + +// EncodeExitFailed encodes ExitFailed into bytes for signing. +func EncodeExitFailed(ctx context.Context, exitFailed *pb.ExitFailed) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + signature := exitFailed.ExitFailureSignature + exitFailed.ExitFailureSignature = nil + out, err := pb.Marshal(exitFailed) + exitFailed.ExitFailureSignature = signature + + return out, err +} diff --git a/vendor/storj.io/common/signing/peers.go b/vendor/storj.io/common/signing/peers.go new file mode 100644 index 000000000..e723c7a2c --- /dev/null +++ b/vendor/storj.io/common/signing/peers.go @@ -0,0 +1,74 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package signing + +import ( + "context" + "crypto" + + "github.com/spacemonkeygo/monkit/v3" + + "storj.io/common/identity" + "storj.io/common/pkcrypto" + "storj.io/common/storj" +) + +var mon = monkit.Package() + +// PrivateKey implements a signer and signee using a crypto.PrivateKey. +type PrivateKey struct { + Self storj.NodeID + Key crypto.PrivateKey +} + +// SignerFromFullIdentity returns signer based on full identity. +func SignerFromFullIdentity(identity *identity.FullIdentity) Signer { + return &PrivateKey{ + Self: identity.ID, + Key: identity.Key, + } +} + +// ID returns node id associated with PrivateKey. +func (private *PrivateKey) ID() storj.NodeID { return private.Self } + +// HashAndSign hashes the data and signs with the used key. +func (private *PrivateKey) HashAndSign(ctx context.Context, data []byte) (_ []byte, err error) { + defer mon.Task()(&ctx)(&err) + return pkcrypto.HashAndSign(private.Key, data) +} + +// HashAndVerifySignature hashes the data and verifies that the signature belongs to the PrivateKey. +func (private *PrivateKey) HashAndVerifySignature(ctx context.Context, data, signature []byte) (err error) { + defer mon.Task()(&ctx)(&err) + pub, err := pkcrypto.PublicKeyFromPrivate(private.Key) + if err != nil { + return err + } + + return pkcrypto.HashAndVerifySignature(pub, data, signature) +} + +// PublicKey implements a signee using crypto.PublicKey. +type PublicKey struct { + Self storj.NodeID + Key crypto.PublicKey +} + +// SigneeFromPeerIdentity returns signee based on peer identity. +func SigneeFromPeerIdentity(identity *identity.PeerIdentity) Signee { + return &PublicKey{ + Self: identity.ID, + Key: identity.Leaf.PublicKey, + } +} + +// ID returns node id associated with this PublicKey. +func (public *PublicKey) ID() storj.NodeID { return public.Self } + +// HashAndVerifySignature hashes the data and verifies that the signature belongs to the PublicKey. +func (public *PublicKey) HashAndVerifySignature(ctx context.Context, data, signature []byte) (err error) { + defer mon.Task()(&ctx)(&err) + return pkcrypto.HashAndVerifySignature(public.Key, data, signature) +} diff --git a/vendor/storj.io/common/signing/sign.go b/vendor/storj.io/common/signing/sign.go new file mode 100644 index 000000000..40a64f353 --- /dev/null +++ b/vendor/storj.io/common/signing/sign.go @@ -0,0 +1,165 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package signing + +import ( + "context" + + "github.com/zeebo/errs" + + "storj.io/common/pb" + "storj.io/common/storj" +) + +// Error is the default error class for signing package. +var Error = errs.Class("signing") + +// Signer is able to sign data and verify own signature belongs. +type Signer interface { + ID() storj.NodeID + HashAndSign(ctx context.Context, data []byte) ([]byte, error) + HashAndVerifySignature(ctx context.Context, data, signature []byte) error +} + +// SignOrderLimit signs the order limit using the specified signer. +// Signer is a satellite. +func SignOrderLimit(ctx context.Context, satellite Signer, unsigned *pb.OrderLimit) (_ *pb.OrderLimit, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeOrderLimit(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.SatelliteSignature, err = satellite.HashAndSign(ctx, bytes) + if err != nil { + return nil, Error.Wrap(err) + } + + return &signed, nil +} + +// SignUplinkOrder signs the order using the specified signer. +// Signer is an uplink. +func SignUplinkOrder(ctx context.Context, privateKey storj.PiecePrivateKey, unsigned *pb.Order) (_ *pb.Order, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeOrder(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.UplinkSignature, err = privateKey.Sign(bytes) + if err != nil { + return nil, Error.Wrap(err) + } + return &signed, nil +} + +// SignPieceHash signs the piece hash using the specified signer. +// Signer is either uplink or storage node. +func SignPieceHash(ctx context.Context, signer Signer, unsigned *pb.PieceHash) (_ *pb.PieceHash, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodePieceHash(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.Signature, err = signer.HashAndSign(ctx, bytes) + if err != nil { + return nil, Error.Wrap(err) + } + + return &signed, nil +} + +// SignUplinkPieceHash signs the piece hash using the specified signer. +// Signer is either uplink or storage node. +func SignUplinkPieceHash(ctx context.Context, privateKey storj.PiecePrivateKey, unsigned *pb.PieceHash) (_ *pb.PieceHash, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodePieceHash(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.Signature, err = privateKey.Sign(bytes) + if err != nil { + return nil, Error.Wrap(err) + } + return &signed, nil +} + +// SignStreamID signs the stream ID using the specified signer +// Signer is a satellite +func SignStreamID(ctx context.Context, signer Signer, unsigned *pb.SatStreamID) (_ *pb.SatStreamID, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeStreamID(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.SatelliteSignature, err = signer.HashAndSign(ctx, bytes) + if err != nil { + return nil, Error.Wrap(err) + } + + return &signed, nil +} + +// SignSegmentID signs the segment ID using the specified signer +// Signer is a satellite +func SignSegmentID(ctx context.Context, signer Signer, unsigned *pb.SatSegmentID) (_ *pb.SatSegmentID, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeSegmentID(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.SatelliteSignature, err = signer.HashAndSign(ctx, bytes) + if err != nil { + return nil, Error.Wrap(err) + } + + return &signed, nil +} + +// SignExitCompleted signs the ExitCompleted using the specified signer +// Signer is a satellite +func SignExitCompleted(ctx context.Context, signer Signer, unsigned *pb.ExitCompleted) (_ *pb.ExitCompleted, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeExitCompleted(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.ExitCompleteSignature, err = signer.HashAndSign(ctx, bytes) + if err != nil { + return nil, Error.Wrap(err) + } + + return &signed, nil +} + +// SignExitFailed signs the ExitFailed using the specified signer +// Signer is a satellite +func SignExitFailed(ctx context.Context, signer Signer, unsigned *pb.ExitFailed) (_ *pb.ExitFailed, err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeExitFailed(ctx, unsigned) + if err != nil { + return nil, Error.Wrap(err) + } + + signed := *unsigned + signed.ExitFailureSignature, err = signer.HashAndSign(ctx, bytes) + if err != nil { + return nil, Error.Wrap(err) + } + + return &signed, nil +} diff --git a/vendor/storj.io/common/signing/verify.go b/vendor/storj.io/common/signing/verify.go new file mode 100644 index 000000000..086d0860a --- /dev/null +++ b/vendor/storj.io/common/signing/verify.go @@ -0,0 +1,117 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package signing + +import ( + "context" + + "storj.io/common/pb" + "storj.io/common/storj" +) + +// Signee is able to verify that the data signature belongs to the signee. +type Signee interface { + ID() storj.NodeID + HashAndVerifySignature(ctx context.Context, data, signature []byte) error +} + +// VerifyOrderLimitSignature verifies that the signature inside order limit is valid and belongs to the satellite. +func VerifyOrderLimitSignature(ctx context.Context, satellite Signee, signed *pb.OrderLimit) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeOrderLimit(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) +} + +// VerifyOrderSignature verifies that the signature inside order is valid and belongs to the uplink. +func VerifyOrderSignature(ctx context.Context, uplink Signee, signed *pb.Order) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeOrder(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return uplink.HashAndVerifySignature(ctx, bytes, signed.UplinkSignature) +} + +// VerifyUplinkOrderSignature verifies that the signature inside order is valid and belongs to the uplink. +func VerifyUplinkOrderSignature(ctx context.Context, publicKey storj.PiecePublicKey, signed *pb.Order) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeOrder(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return Error.Wrap(publicKey.Verify(bytes, signed.UplinkSignature)) +} + +// VerifyPieceHashSignature verifies that the signature inside piece hash is valid and belongs to the signer, which is either uplink or storage node. +func VerifyPieceHashSignature(ctx context.Context, signee Signee, signed *pb.PieceHash) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodePieceHash(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return signee.HashAndVerifySignature(ctx, bytes, signed.Signature) +} + +// VerifyUplinkPieceHashSignature verifies that the signature inside piece hash is valid and belongs to the signer, which is either uplink or storage node. +func VerifyUplinkPieceHashSignature(ctx context.Context, publicKey storj.PiecePublicKey, signed *pb.PieceHash) (err error) { + defer mon.Task()(&ctx)(&err) + + bytes, err := EncodePieceHash(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return Error.Wrap(publicKey.Verify(bytes, signed.Signature)) +} + +// VerifyStreamID verifies that the signature inside stream ID belongs to the satellite +func VerifyStreamID(ctx context.Context, satellite Signee, signed *pb.SatStreamID) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeStreamID(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) +} + +// VerifySegmentID verifies that the signature inside segment ID belongs to the satellite +func VerifySegmentID(ctx context.Context, satellite Signee, signed *pb.SatSegmentID) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeSegmentID(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return satellite.HashAndVerifySignature(ctx, bytes, signed.SatelliteSignature) +} + +// VerifyExitCompleted verifies that the signature inside ExitCompleted belongs to the satellite +func VerifyExitCompleted(ctx context.Context, satellite Signee, signed *pb.ExitCompleted) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeExitCompleted(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return Error.Wrap(satellite.HashAndVerifySignature(ctx, bytes, signed.ExitCompleteSignature)) +} + +// VerifyExitFailed verifies that the signature inside ExitFailed belongs to the satellite +func VerifyExitFailed(ctx context.Context, satellite Signee, signed *pb.ExitFailed) (err error) { + defer mon.Task()(&ctx)(&err) + bytes, err := EncodeExitFailed(ctx, signed) + if err != nil { + return Error.Wrap(err) + } + + return Error.Wrap(satellite.HashAndVerifySignature(ctx, bytes, signed.ExitFailureSignature)) +} diff --git a/vendor/storj.io/common/storj/bucket.go b/vendor/storj.io/common/storj/bucket.go new file mode 100644 index 000000000..81208eec9 --- /dev/null +++ b/vendor/storj.io/common/storj/bucket.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "time" + + "github.com/zeebo/errs" + + "storj.io/common/uuid" +) + +var ( + // ErrBucket is an error class for general bucket errors + ErrBucket = errs.Class("bucket") + + // ErrNoBucket is an error class for using empty bucket name + ErrNoBucket = errs.Class("no bucket specified") + + // ErrBucketNotFound is an error class for non-existing bucket + ErrBucketNotFound = errs.Class("bucket not found") +) + +// Bucket contains information about a specific bucket +type Bucket struct { + ID uuid.UUID + Name string + ProjectID uuid.UUID + PartnerID uuid.UUID + Created time.Time + PathCipher CipherSuite + DefaultSegmentsSize int64 + DefaultRedundancyScheme RedundancyScheme + DefaultEncryptionParameters EncryptionParameters +} diff --git a/vendor/storj.io/common/storj/doc.go b/vendor/storj.io/common/storj/doc.go new file mode 100644 index 000000000..912cb7940 --- /dev/null +++ b/vendor/storj.io/common/storj/doc.go @@ -0,0 +1,7 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +/*Package storj contains the types which represent the main entities of the +Storj domain. +*/ +package storj diff --git a/vendor/storj.io/common/storj/encryption.go b/vendor/storj.io/common/storj/encryption.go new file mode 100644 index 000000000..0e295f90c --- /dev/null +++ b/vendor/storj.io/common/storj/encryption.go @@ -0,0 +1,163 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "encoding/base32" + + "github.com/zeebo/errs" +) + +// EncryptionParameters is the cipher suite and parameters used for encryption +type EncryptionParameters struct { + // CipherSuite specifies the cipher suite to be used for encryption. + CipherSuite CipherSuite + // BlockSize determines the unit size at which encryption is performed. + // It is important to distinguish this from the block size used by the + // cipher suite (probably 128 bits). There is some small overhead for + // each encryption unit, so BlockSize should not be too small, but + // smaller sizes yield shorter first-byte latency and better seek times. + // Note that BlockSize itself is the size of data blocks _after_ they + // have been encrypted and the authentication overhead has been added. + // It is _not_ the size of the data blocks to _be_ encrypted. + BlockSize int32 +} + +// IsZero returns true if no field in the struct is set to non-zero value +func (params EncryptionParameters) IsZero() bool { + return params == (EncryptionParameters{}) +} + +// CipherSuite specifies one of the encryption suites supported by Storj +// libraries for encryption of in-network data. +type CipherSuite byte + +const ( + // EncUnspecified indicates no encryption suite has been selected. + EncUnspecified = CipherSuite(iota) + // EncNull indicates use of the NULL cipher; that is, no encryption is + // done. The ciphertext is equal to the plaintext. + EncNull + // EncAESGCM indicates use of AES128-GCM encryption. + EncAESGCM + // EncSecretBox indicates use of XSalsa20-Poly1305 encryption, as provided + // by the NaCl cryptography library under the name "Secretbox". + EncSecretBox + // EncNullBase64URL is like EncNull but Base64 encodes/decodes the + // binary path data (URL-safe) + EncNullBase64URL +) + +// Constant definitions for key and nonce sizes +const ( + KeySize = 32 + NonceSize = 24 +) + +// NewKey creates a new Storj key from humanReadableKey. +func NewKey(humanReadableKey []byte) (*Key, error) { + var key Key + + // Because of backward compatibility the key is filled with 0 or truncated if + // humanReadableKey isn't of the same size that KeySize. + // See https://github.com/storj/storj/pull/1967#discussion_r285544849 + copy(key[:], humanReadableKey) + return &key, nil +} + +// Key represents the largest key used by any encryption protocol +type Key [KeySize]byte + +// Raw returns the key as a raw byte array pointer +func (key *Key) Raw() *[KeySize]byte { + return (*[KeySize]byte)(key) +} + +// IsZero returns true if key is nil or it points to its zero value +func (key *Key) IsZero() bool { + return key == nil || *key == (Key{}) +} + +// ErrNonce is used when something goes wrong with a stream ID +var ErrNonce = errs.Class("nonce error") + +// nonceEncoding is base32 without padding +var nonceEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// Nonce represents the largest nonce used by any encryption protocol +type Nonce [NonceSize]byte + +// NonceFromString decodes an base32 encoded +func NonceFromString(s string) (Nonce, error) { + nonceBytes, err := nonceEncoding.DecodeString(s) + if err != nil { + return Nonce{}, ErrNonce.Wrap(err) + } + return NonceFromBytes(nonceBytes) +} + +// NonceFromBytes converts a byte slice into a nonce +func NonceFromBytes(b []byte) (Nonce, error) { + if len(b) != len(Nonce{}) { + return Nonce{}, ErrNonce.New("not enough bytes to make a nonce; have %d, need %d", len(b), len(NodeID{})) + } + + var nonce Nonce + copy(nonce[:], b) + return nonce, nil +} + +// IsZero returns whether nonce is unassigned +func (nonce Nonce) IsZero() bool { + return nonce == Nonce{} +} + +// String representation of the nonce +func (nonce Nonce) String() string { return nonceEncoding.EncodeToString(nonce.Bytes()) } + +// Bytes returns bytes of the nonce +func (nonce Nonce) Bytes() []byte { return nonce[:] } + +// Raw returns the nonce as a raw byte array pointer +func (nonce *Nonce) Raw() *[NonceSize]byte { + return (*[NonceSize]byte)(nonce) +} + +// Marshal serializes a nonce +func (nonce Nonce) Marshal() ([]byte, error) { + return nonce.Bytes(), nil +} + +// MarshalTo serializes a nonce into the passed byte slice +func (nonce *Nonce) MarshalTo(data []byte) (n int, err error) { + n = copy(data, nonce.Bytes()) + return n, nil +} + +// Unmarshal deserializes a nonce +func (nonce *Nonce) Unmarshal(data []byte) error { + var err error + *nonce, err = NonceFromBytes(data) + return err +} + +// Size returns the length of a nonce (implements gogo's custom type interface) +func (nonce Nonce) Size() int { + return len(nonce) +} + +// MarshalJSON serializes a nonce to a json string as bytes +func (nonce Nonce) MarshalJSON() ([]byte, error) { + return []byte(`"` + nonce.String() + `"`), nil +} + +// UnmarshalJSON deserializes a json string (as bytes) to a nonce +func (nonce *Nonce) UnmarshalJSON(data []byte) error { + var err error + *nonce, err = NonceFromString(string(data)) + return err +} + +// EncryptedPrivateKey is a private key that has been encrypted +type EncryptedPrivateKey []byte diff --git a/vendor/storj.io/common/storj/identity_version.go b/vendor/storj.io/common/storj/identity_version.go new file mode 100644 index 000000000..1fe28316c --- /dev/null +++ b/vendor/storj.io/common/storj/identity_version.go @@ -0,0 +1,140 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "strconv" + "strings" + + "storj.io/common/peertls/extensions" + "storj.io/common/pkcrypto" +) + +const ( + // V0 represents identity version 0 + // NB: identities created before identity versioning (i.e. which don't have a + // version extension; "legacy") will be recognized as V0. + V0 = IDVersionNumber(iota) +) + +var ( + // IDVersions is a map of all identity versions + IDVersions = map[IDVersionNumber]IDVersion{ + V0: { + Number: V0, + NewPrivateKey: pkcrypto.GeneratePrivateKey, + }, + } + + // IDVersionHandler compares the identity version of the remote peers + // certificate chain to the extension options passed to the factory. + IDVersionHandler = extensions.NewHandlerFactory( + &extensions.IdentityVersionExtID, idVersionHandler, + ) +) + +// IDVersionNumber is the number of an identity version. +type IDVersionNumber uint8 + +// IDVersion holds fields that are used to distinguish different identity +// versions from one another; used in identity generation. +type IDVersion struct { + Number IDVersionNumber + NewPrivateKey func() (crypto.PrivateKey, error) +} + +func init() { + extensions.DefaultHandlers.Register(IDVersionHandler) +} + +// GetIDVersion looks up the given version number in the map of registered +// versions, returning an error if none is found. +func GetIDVersion(number IDVersionNumber) (IDVersion, error) { + version, ok := IDVersions[number] + if !ok { + return IDVersion{}, ErrVersion.New("unknown version") + } + + return version, nil +} + +// LatestIDVersion returns the last IDVersion registered. +func LatestIDVersion() IDVersion { + return IDVersions[IDVersionNumber(len(IDVersions)-1)] +} + +// IDVersionFromCert parsed the IDVersion from the passed certificate's IDVersion extension. +func IDVersionFromCert(cert *x509.Certificate) (IDVersion, error) { + for _, ext := range cert.Extensions { + if extensions.IdentityVersionExtID.Equal(ext.Id) { + return GetIDVersion(IDVersionNumber(ext.Value[0])) + } + } + + // NB: for backward-compatibility with V0 certificate generation, V0 is used + // when no version extension exists. + // TODO(beta maybe?): Error here instead; we should drop support for + // certificates without a version extension. + // + // return IDVersion{}, ErrVersion.New("certificate doesn't contain an identity version extension") + return IDVersions[V0], nil +} + +// IDVersionInVersions returns an error if the given version is in the given string of version(s)/range(s). +func IDVersionInVersions(versionNumber IDVersionNumber, versionsStr string) error { + switch versionsStr { + case "": + return ErrVersion.New("no allowed peer identity versions specified") + case "*": + return nil + case "latest": + if versionNumber == LatestIDVersion().Number { + return nil + } + default: + versionRanges := strings.Split(versionsStr, ",") + for _, versionRange := range versionRanges { + if strings.Contains(versionRange, "-") { + versionLimits := strings.Split(versionRange, "-") + if len(versionLimits) != 2 { + return ErrVersion.New("malformed PeerIDVersions string: %s", versionsStr) + } + + begin, err := strconv.Atoi(versionLimits[0]) + if err != nil { + return ErrVersion.Wrap(err) + } + + end, err := strconv.Atoi(versionLimits[1]) + if err != nil { + return ErrVersion.Wrap(err) + } + + for i := begin; i <= end; i++ { + if versionNumber == IDVersionNumber(i) { + return nil + } + } + } else { + versionInt, err := strconv.Atoi(versionRange) + if err != nil { + return ErrVersion.Wrap(err) + } + if versionNumber == IDVersionNumber(versionInt) { + return nil + } + } + } + } + return ErrVersion.New("version %d not in versions %s", versionNumber, versionsStr) +} + +func idVersionHandler(opts *extensions.Options) extensions.HandlerFunc { + return func(ext pkix.Extension, chain [][]*x509.Certificate) error { + return IDVersionInVersions(IDVersionNumber(ext.Value[0]), opts.PeerIDVersions) + } +} diff --git a/vendor/storj.io/common/storj/metainfo.go b/vendor/storj.io/common/storj/metainfo.go new file mode 100644 index 000000000..75d6fdec8 --- /dev/null +++ b/vendor/storj.io/common/storj/metainfo.go @@ -0,0 +1,79 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +// ListDirection specifies listing direction +type ListDirection int8 + +const ( + // Before lists backwards from cursor, without cursor [NOT SUPPORTED] + Before = ListDirection(-2) + // Backward lists backwards from cursor, including cursor [NOT SUPPORTED] + Backward = ListDirection(-1) + // Forward lists forwards from cursor, including cursor + Forward = ListDirection(1) + // After lists forwards from cursor, without cursor + After = ListDirection(2) +) + +// ListOptions lists objects +type ListOptions struct { + Prefix Path + Cursor Path // Cursor is relative to Prefix, full path is Prefix + Cursor + Delimiter rune + Recursive bool + Direction ListDirection + Limit int +} + +// ObjectList is a list of objects +type ObjectList struct { + Bucket string + Prefix Path + More bool + + // Items paths are relative to Prefix + // To get the full path use list.Prefix + list.Items[0].Path + Items []Object +} + +// NextPage returns options for listing the next page +func (opts ListOptions) NextPage(list ObjectList) ListOptions { + if !list.More || len(list.Items) == 0 { + return ListOptions{} + } + + return ListOptions{ + Prefix: opts.Prefix, + Cursor: list.Items[len(list.Items)-1].Path, + Direction: After, + Limit: opts.Limit, + } +} + +// BucketListOptions lists objects +type BucketListOptions struct { + Cursor string + Direction ListDirection + Limit int +} + +// BucketList is a list of buckets +type BucketList struct { + More bool + Items []Bucket +} + +// NextPage returns options for listing the next page +func (opts BucketListOptions) NextPage(list BucketList) BucketListOptions { + if !list.More || len(list.Items) == 0 { + return BucketListOptions{} + } + + return BucketListOptions{ + Cursor: list.Items[len(list.Items)-1].Name, + Direction: After, + Limit: opts.Limit, + } +} diff --git a/vendor/storj.io/common/storj/node.go b/vendor/storj.io/common/storj/node.go new file mode 100644 index 000000000..5c05366a2 --- /dev/null +++ b/vendor/storj.io/common/storj/node.go @@ -0,0 +1,260 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "crypto/sha256" + "crypto/x509/pkix" + "database/sql/driver" + "encoding/json" + "math/bits" + + "github.com/btcsuite/btcutil/base58" + "github.com/zeebo/errs" + + "storj.io/common/peertls/extensions" +) + +var ( + // ErrNodeID is used when something goes wrong with a node id. + ErrNodeID = errs.Class("node ID error") + // ErrVersion is used for identity version related errors. + ErrVersion = errs.Class("node ID version error") +) + +// NodeIDSize is the byte length of a NodeID +const NodeIDSize = sha256.Size + +// NodeID is a unique node identifier +type NodeID [NodeIDSize]byte + +// NodeIDList is a slice of NodeIDs (implements sort) +type NodeIDList []NodeID + +// NewVersionedID adds an identity version to a node ID. +func NewVersionedID(id NodeID, version IDVersion) NodeID { + var versionedID NodeID + copy(versionedID[:], id[:]) + + versionedID[NodeIDSize-1] = byte(version.Number) + return versionedID +} + +// NewVersionExt creates a new identity version certificate extension for the +// given identity version, +func NewVersionExt(version IDVersion) pkix.Extension { + return pkix.Extension{ + Id: extensions.IdentityVersionExtID, + Value: []byte{byte(version.Number)}, + } +} + +// NodeIDFromString decodes a base58check encoded node id string +func NodeIDFromString(s string) (NodeID, error) { + idBytes, versionNumber, err := base58.CheckDecode(s) + if err != nil { + return NodeID{}, ErrNodeID.Wrap(err) + } + unversionedID, err := NodeIDFromBytes(idBytes) + if err != nil { + return NodeID{}, err + } + + version := IDVersions[IDVersionNumber(versionNumber)] + return NewVersionedID(unversionedID, version), nil +} + +// NodeIDsFromBytes converts a 2d byte slice into a list of nodes +func NodeIDsFromBytes(b [][]byte) (ids NodeIDList, err error) { + var idErrs []error + for _, idBytes := range b { + id, err := NodeIDFromBytes(idBytes) + if err != nil { + idErrs = append(idErrs, err) + continue + } + + ids = append(ids, id) + } + + if err = errs.Combine(idErrs...); err != nil { + return nil, err + } + return ids, nil +} + +// NodeIDFromBytes converts a byte slice into a node id +func NodeIDFromBytes(b []byte) (NodeID, error) { + bLen := len(b) + if bLen != len(NodeID{}) { + return NodeID{}, ErrNodeID.New("not enough bytes to make a node id; have %d, need %d", bLen, len(NodeID{})) + } + + var id NodeID + copy(id[:], b) + return id, nil +} + +// String returns NodeID as base58 encoded string with checksum and version bytes +func (id NodeID) String() string { + unversionedID := id.unversioned() + return base58.CheckEncode(unversionedID[:], byte(id.Version().Number)) +} + +// IsZero returns whether NodeID is unassigned +func (id NodeID) IsZero() bool { + return id == NodeID{} +} + +// Bytes returns raw bytes of the id +func (id NodeID) Bytes() []byte { return id[:] } + +// Less returns whether id is smaller than other in lexicographic order. +func (id NodeID) Less(other NodeID) bool { + for k, v := range id { + if v < other[k] { + return true + } else if v > other[k] { + return false + } + } + return false +} + +// Version returns the version of the identity format +func (id NodeID) Version() IDVersion { + versionNumber := id.versionByte() + if versionNumber == 0 { + return IDVersions[V0] + } + + version, err := GetIDVersion(IDVersionNumber(versionNumber)) + // NB: when in doubt, use V0 + if err != nil { + return IDVersions[V0] + } + + return version +} + +// Difficulty returns the number of trailing zero bits in a node ID +func (id NodeID) Difficulty() (uint16, error) { + idLen := len(id) + var b byte + var zeroBits int + // NB: last difficulty byte is used for version + for i := 2; i <= idLen; i++ { + b = id[idLen-i] + + if b != 0 { + zeroBits = bits.TrailingZeros16(uint16(b)) + if zeroBits == 16 { + // we already checked that b != 0. + return 0, ErrNodeID.New("impossible codepath!") + } + + return uint16((i-1)*8 + zeroBits), nil + } + } + + return 0, ErrNodeID.New("difficulty matches id hash length: %d; hash (hex): % x", idLen, id) +} + +// Marshal serializes a node id +func (id NodeID) Marshal() ([]byte, error) { + return id.Bytes(), nil +} + +// MarshalTo serializes a node ID into the passed byte slice +func (id *NodeID) MarshalTo(data []byte) (n int, err error) { + n = copy(data, id.Bytes()) + return n, nil +} + +// Unmarshal deserializes a node ID +func (id *NodeID) Unmarshal(data []byte) error { + var err error + *id, err = NodeIDFromBytes(data) + return err +} + +func (id NodeID) versionByte() byte { + return id[NodeIDSize-1] +} + +// unversioned returns the node ID with the version byte replaced with `0`. +// NB: Legacy node IDs (i.e. pre-identity-versions) with a difficulty less +// than `8` are unsupported. +func (id NodeID) unversioned() NodeID { + unversionedID := NodeID{} + copy(unversionedID[:], id[:NodeIDSize-1]) + return unversionedID +} + +// Size returns the length of a node ID (implements gogo's custom type interface) +func (id *NodeID) Size() int { + return len(id) +} + +// MarshalJSON serializes a node ID to a json string as bytes +func (id NodeID) MarshalJSON() ([]byte, error) { + return []byte(`"` + id.String() + `"`), nil +} + +// Value converts a NodeID to a database field +func (id NodeID) Value() (driver.Value, error) { + return id.Bytes(), nil +} + +// Scan extracts a NodeID from a database field +func (id *NodeID) Scan(src interface{}) (err error) { + b, ok := src.([]byte) + if !ok { + return ErrNodeID.New("NodeID Scan expects []byte") + } + n, err := NodeIDFromBytes(b) + *id = n + return err +} + +// UnmarshalJSON deserializes a json string (as bytes) to a node ID +func (id *NodeID) UnmarshalJSON(data []byte) error { + var unquoted string + err := json.Unmarshal(data, &unquoted) + if err != nil { + return err + } + + *id, err = NodeIDFromString(unquoted) + if err != nil { + return err + } + return nil +} + +// Strings returns a string slice of the node IDs +func (n NodeIDList) Strings() []string { + var strings []string + for _, nid := range n { + strings = append(strings, nid.String()) + } + return strings +} + +// Bytes returns a 2d byte slice of the node IDs +func (n NodeIDList) Bytes() (idsBytes [][]byte) { + for _, nid := range n { + idsBytes = append(idsBytes, nid.Bytes()) + } + return idsBytes +} + +// Len implements sort.Interface.Len() +func (n NodeIDList) Len() int { return len(n) } + +// Swap implements sort.Interface.Swap() +func (n NodeIDList) Swap(i, j int) { n[i], n[j] = n[j], n[i] } + +// Less implements sort.Interface.Less() +func (n NodeIDList) Less(i, j int) bool { return n[i].Less(n[j]) } diff --git a/vendor/storj.io/common/storj/nodeurl.go b/vendor/storj.io/common/storj/nodeurl.go new file mode 100644 index 000000000..0074fef1f --- /dev/null +++ b/vendor/storj.io/common/storj/nodeurl.go @@ -0,0 +1,137 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "net/url" + "strings" + + "github.com/zeebo/errs" +) + +var ( + // ErrNodeURL is used when something goes wrong with a node url. + ErrNodeURL = errs.Class("node URL error") +) + +// NodeURL defines a structure for connecting to a node. +type NodeURL struct { + ID NodeID + Address string +} + +// ParseNodeURL parses node URL string. +// +// Examples: +// +// raw IP: +// 33.20.0.1:7777 +// [2001:db8:1f70::999:de8:7648:6e8]:7777 +// +// with NodeID: +// 12vha9oTFnerxYRgeQ2BZqoFrLrnmmf5UWTCY2jA77dF3YvWew7@33.20.0.1:7777 +// 12vha9oTFnerxYRgeQ2BZqoFrLrnmmf5UWTCY2jA77dF3YvWew7@[2001:db8:1f70::999:de8:7648:6e8]:7777 +// +// without host: +// 12vha9oTFnerxYRgeQ2BZqoFrLrnmmf5UWTCY2jA77dF3YvWew7@ +func ParseNodeURL(s string) (NodeURL, error) { + if s == "" { + return NodeURL{}, nil + } + if !strings.HasPrefix(s, "storj://") { + if !strings.Contains(s, "://") { + s = "storj://" + s + } + } + + u, err := url.Parse(s) + if err != nil { + return NodeURL{}, ErrNodeURL.Wrap(err) + } + if u.Scheme != "" && u.Scheme != "storj" { + return NodeURL{}, ErrNodeURL.New("unknown scheme %q", u.Scheme) + } + + var node NodeURL + if u.User != nil { + node.ID, err = NodeIDFromString(u.User.String()) + if err != nil { + return NodeURL{}, ErrNodeURL.Wrap(err) + } + } + node.Address = u.Host + + return node, nil +} + +// IsZero returns whether the url is empty. +func (url NodeURL) IsZero() bool { + return url == NodeURL{} +} + +// String converts NodeURL to a string +func (url NodeURL) String() string { + if url.ID.IsZero() { + return url.Address + } + return url.ID.String() + "@" + url.Address +} + +// Set implements flag.Value interface +func (url *NodeURL) Set(s string) error { + parsed, err := ParseNodeURL(s) + if err != nil { + return ErrNodeURL.Wrap(err) + } + + *url = parsed + return nil +} + +// Type implements pflag.Value +func (NodeURL) Type() string { return "storj.NodeURL" } + +// NodeURLs defines a comma delimited flag for defining a list node url-s. +type NodeURLs []NodeURL + +// ParseNodeURLs parses comma delimited list of node urls +func ParseNodeURLs(s string) (NodeURLs, error) { + var urls NodeURLs + if s == "" { + return nil, nil + } + + for _, s := range strings.Split(s, ",") { + u, err := ParseNodeURL(s) + if err != nil { + return nil, ErrNodeURL.Wrap(err) + } + urls = append(urls, u) + } + + return urls, nil +} + +// String converts NodeURLs to a string +func (urls NodeURLs) String() string { + var xs []string + for _, u := range urls { + xs = append(xs, u.String()) + } + return strings.Join(xs, ",") +} + +// Set implements flag.Value interface +func (urls *NodeURLs) Set(s string) error { + parsed, err := ParseNodeURLs(s) + if err != nil { + return ErrNodeURL.Wrap(err) + } + + *urls = parsed + return nil +} + +// Type implements pflag.Value +func (NodeURLs) Type() string { return "storj.NodeURLs" } diff --git a/vendor/storj.io/common/storj/object.go b/vendor/storj.io/common/storj/object.go new file mode 100644 index 000000000..b0cc2d5e7 --- /dev/null +++ b/vendor/storj.io/common/storj/object.go @@ -0,0 +1,108 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "time" + + "github.com/zeebo/errs" +) + +var ( + // ErrNoPath is an error class for using empty path + ErrNoPath = errs.Class("no path specified") + + // ErrObjectNotFound is an error class for non-existing object + ErrObjectNotFound = errs.Class("object not found") +) + +// Object contains information about a specific object +type Object struct { + Version uint32 + Bucket Bucket + Path Path + IsPrefix bool + + Metadata map[string]string + + ContentType string + Created time.Time + Modified time.Time + Expires time.Time + + Stream +} + +// ObjectInfo contains information about a specific object +type ObjectInfo struct { + Version uint32 + Bucket string + Path Path + IsPrefix bool + + StreamID StreamID + + Metadata []byte + + ContentType string + Created time.Time + Modified time.Time + Expires time.Time + + Stream +} + +// Stream is information about an object stream +type Stream struct { + ID StreamID + + // Size is the total size of the stream in bytes + Size int64 + // Checksum is the checksum of the segment checksums + Checksum []byte + + // SegmentCount is the number of segments + SegmentCount int64 + // FixedSegmentSize is the size of each segment, + // when all segments have the same size. It is -1 otherwise. + FixedSegmentSize int64 + + // RedundancyScheme specifies redundancy strategy used for this stream + RedundancyScheme + // EncryptionParameters specifies encryption strategy used for this stream + EncryptionParameters + + LastSegment LastSegment // TODO: remove +} + +// LastSegment contains info about last segment +// TODO: remove +type LastSegment struct { + Size int64 + EncryptedKeyNonce Nonce + EncryptedKey EncryptedPrivateKey +} + +// Segment is full segment information +type Segment struct { + Index int64 + // Size is the size of the content in bytes + Size int64 + // Checksum is the checksum of the content + Checksum []byte + // Local data + Inline []byte + // Remote data + PieceID PieceID + Pieces []Piece + // Encryption + EncryptedKeyNonce Nonce + EncryptedKey EncryptedPrivateKey +} + +// Piece is information where a piece is located +type Piece struct { + Number byte + Location NodeID +} diff --git a/vendor/storj.io/common/storj/object_list_item.go b/vendor/storj.io/common/storj/object_list_item.go new file mode 100644 index 000000000..c6d051d8c --- /dev/null +++ b/vendor/storj.io/common/storj/object_list_item.go @@ -0,0 +1,21 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "time" +) + +// ObjectListItem represents listed object +type ObjectListItem struct { + EncryptedPath []byte + Version int32 + Status int32 + CreatedAt time.Time + StatusAt time.Time + ExpiresAt time.Time + EncryptedMetadataNonce Nonce + EncryptedMetadata []byte + IsPrefix bool +} diff --git a/vendor/storj.io/common/storj/path.go b/vendor/storj.io/common/storj/path.go new file mode 100644 index 000000000..0b44174e1 --- /dev/null +++ b/vendor/storj.io/common/storj/path.go @@ -0,0 +1,21 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "strings" +) + +// Path represents a object path +type Path = string + +// SplitPath splits path into a slice of path components +func SplitPath(path Path) []string { + return strings.Split(path, "/") +} + +// JoinPaths concatenates paths to a new single path +func JoinPaths(paths ...Path) Path { + return strings.Join(paths, "/") +} diff --git a/vendor/storj.io/common/storj/pieceid.go b/vendor/storj.io/common/storj/pieceid.go new file mode 100644 index 000000000..bee444757 --- /dev/null +++ b/vendor/storj.io/common/storj/pieceid.go @@ -0,0 +1,140 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha512" + "database/sql/driver" + "encoding/base32" + "encoding/binary" + "encoding/json" + + "github.com/zeebo/errs" +) + +// ErrPieceID is used when something goes wrong with a piece ID +var ErrPieceID = errs.Class("piece ID error") + +// pieceIDEncoding is base32 without padding +var pieceIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// PieceID is the unique identifier for pieces +type PieceID [32]byte + +// NewPieceID creates a piece ID +func NewPieceID() PieceID { + var id PieceID + + _, err := rand.Read(id[:]) + if err != nil { + panic(err) + } + + return id +} + +// PieceIDFromString decodes a hex encoded piece ID string +func PieceIDFromString(s string) (PieceID, error) { + idBytes, err := pieceIDEncoding.DecodeString(s) + if err != nil { + return PieceID{}, ErrPieceID.Wrap(err) + } + return PieceIDFromBytes(idBytes) +} + +// PieceIDFromBytes converts a byte slice into a piece ID +func PieceIDFromBytes(b []byte) (PieceID, error) { + if len(b) != len(PieceID{}) { + return PieceID{}, ErrPieceID.New("not enough bytes to make a piece ID; have %d, need %d", len(b), len(PieceID{})) + } + + var id PieceID + copy(id[:], b) + return id, nil +} + +// IsZero returns whether piece ID is unassigned +func (id PieceID) IsZero() bool { + return id == PieceID{} +} + +// String representation of the piece ID +func (id PieceID) String() string { return pieceIDEncoding.EncodeToString(id.Bytes()) } + +// Bytes returns bytes of the piece ID +func (id PieceID) Bytes() []byte { return id[:] } + +// Derive a new PieceID from the current piece ID, the given storage node ID and piece number +func (id PieceID) Derive(storagenodeID NodeID, pieceNum int32) PieceID { + // TODO: should the secret / content be swapped? + mac := hmac.New(sha512.New, id.Bytes()) + _, _ = mac.Write(storagenodeID.Bytes()) // on hash.Hash write never returns an error + num := make([]byte, 4) + binary.BigEndian.PutUint32(num, uint32(pieceNum)) + _, _ = mac.Write(num) // on hash.Hash write never returns an error + var derived PieceID + copy(derived[:], mac.Sum(nil)) + return derived +} + +// Marshal serializes a piece ID +func (id PieceID) Marshal() ([]byte, error) { + return id.Bytes(), nil +} + +// MarshalTo serializes a piece ID into the passed byte slice +func (id *PieceID) MarshalTo(data []byte) (n int, err error) { + n = copy(data, id.Bytes()) + return n, nil +} + +// Unmarshal deserializes a piece ID +func (id *PieceID) Unmarshal(data []byte) error { + var err error + *id, err = PieceIDFromBytes(data) + return err +} + +// Size returns the length of a piece ID (implements gogo's custom type interface) +func (id *PieceID) Size() int { + return len(id) +} + +// MarshalJSON serializes a piece ID to a json string as bytes +func (id PieceID) MarshalJSON() ([]byte, error) { + return []byte(`"` + id.String() + `"`), nil +} + +// UnmarshalJSON deserializes a json string (as bytes) to a piece ID +func (id *PieceID) UnmarshalJSON(data []byte) error { + var unquoted string + err := json.Unmarshal(data, &unquoted) + if err != nil { + return err + } + + *id, err = PieceIDFromString(unquoted) + if err != nil { + return err + } + return nil +} + +// Value set a PieceID to a database field +func (id PieceID) Value() (driver.Value, error) { + return id.Bytes(), nil +} + +// Scan extracts a PieceID from a database field +func (id *PieceID) Scan(src interface{}) (err error) { + b, ok := src.([]byte) + if !ok { + return ErrPieceID.New("PieceID Scan expects []byte") + } + n, err := PieceIDFromBytes(b) + *id = n + return err +} diff --git a/vendor/storj.io/common/storj/piecekey.go b/vendor/storj.io/common/storj/piecekey.go new file mode 100644 index 000000000..6dd5f917a --- /dev/null +++ b/vendor/storj.io/common/storj/piecekey.go @@ -0,0 +1,159 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "database/sql/driver" + + "github.com/zeebo/errs" + "golang.org/x/crypto/ed25519" +) + +// ErrPieceKey is used when something goes wrong with a piece key +var ErrPieceKey = errs.Class("piece key error") + +// PiecePublicKey is the unique identifier for pieces +type PiecePublicKey struct { + pub ed25519.PublicKey +} + +// PiecePrivateKey is the unique identifier for pieces +type PiecePrivateKey struct { + priv ed25519.PrivateKey +} + +// NewPieceKey creates a piece key pair +func NewPieceKey() (PiecePublicKey, PiecePrivateKey, error) { + pub, priv, err := ed25519.GenerateKey(nil) + + return PiecePublicKey{pub}, PiecePrivateKey{priv}, ErrPieceKey.Wrap(err) +} + +// PiecePublicKeyFromBytes converts bytes to a piece public key. +func PiecePublicKeyFromBytes(data []byte) (PiecePublicKey, error) { + if len(data) != ed25519.PublicKeySize { + return PiecePublicKey{}, ErrPieceKey.New("invalid public key length %v", len(data)) + } + return PiecePublicKey{ed25519.PublicKey(data)}, nil +} + +// PiecePrivateKeyFromBytes converts bytes to a piece private key. +func PiecePrivateKeyFromBytes(data []byte) (PiecePrivateKey, error) { + if len(data) != ed25519.PrivateKeySize { + return PiecePrivateKey{}, ErrPieceKey.New("invalid private key length %v", len(data)) + } + return PiecePrivateKey{ed25519.PrivateKey(data)}, nil +} + +// Sign signs the message with privateKey and returns a signature. +func (key PiecePrivateKey) Sign(data []byte) ([]byte, error) { + if len(key.priv) != ed25519.PrivateKeySize { + return nil, ErrPieceKey.New("invalid private key length %v", len(key.priv)) + } + return ed25519.Sign(key.priv, data), nil +} + +// Verify reports whether signature is a valid signature of message by publicKey. +func (key PiecePublicKey) Verify(data, signature []byte) error { + if len(key.pub) != ed25519.PublicKeySize { + return ErrPieceKey.New("invalid public key length %v", len(key.pub)) + } + if !ed25519.Verify(key.pub, data, signature) { + return ErrPieceKey.New("invalid signature") + } + return nil +} + +// Bytes returns bytes of the piece public key +func (key PiecePublicKey) Bytes() []byte { return key.pub[:] } + +// Bytes returns bytes of the piece private key +func (key PiecePrivateKey) Bytes() []byte { return key.priv[:] } + +// IsZero returns whether the key is empty. +func (key PiecePublicKey) IsZero() bool { return len(key.pub) == 0 } + +// IsZero returns whether the key is empty. +func (key PiecePrivateKey) IsZero() bool { return len(key.priv) == 0 } + +// Marshal serializes a piece public key +func (key PiecePublicKey) Marshal() ([]byte, error) { return key.Bytes(), nil } + +// Marshal serializes a piece private key +func (key PiecePrivateKey) Marshal() ([]byte, error) { return key.Bytes(), nil } + +// MarshalTo serializes a piece public key into the passed byte slice +func (key *PiecePublicKey) MarshalTo(data []byte) (n int, err error) { + n = copy(data, key.Bytes()) + return n, nil +} + +// MarshalTo serializes a piece private key into the passed byte slice +func (key *PiecePrivateKey) MarshalTo(data []byte) (n int, err error) { + n = copy(data, key.Bytes()) + return n, nil +} + +// Unmarshal deserializes a piece public key +func (key *PiecePublicKey) Unmarshal(data []byte) error { + // allow empty keys + if len(data) == 0 { + key.pub = nil + return nil + } + var err error + *key, err = PiecePublicKeyFromBytes(data) + return err +} + +// Unmarshal deserializes a piece private key +func (key *PiecePrivateKey) Unmarshal(data []byte) error { + // allow empty keys + if len(data) == 0 { + key.priv = nil + return nil + } + if len(data) == 0 { + return nil + } + var err error + *key, err = PiecePrivateKeyFromBytes(data) + return err +} + +// Size returns the length of a piece public key (implements gogo's custom type interface) +func (key *PiecePublicKey) Size() int { return len(key.pub) } + +// Size returns the length of a piece private key (implements gogo's custom type interface) +func (key *PiecePrivateKey) Size() int { return len(key.priv) } + +// Value set a PiecePublicKey to a database field +func (key PiecePublicKey) Value() (driver.Value, error) { + return key.Bytes(), nil +} + +// Value set a PiecePrivateKey to a database field +func (key PiecePrivateKey) Value() (driver.Value, error) { return key.Bytes(), nil } + +// Scan extracts a PiecePublicKey from a database field +func (key *PiecePublicKey) Scan(src interface{}) (err error) { + b, ok := src.([]byte) + if !ok { + return ErrPieceKey.New("PiecePublicKey Scan expects []byte") + } + n, err := PiecePublicKeyFromBytes(b) + *key = n + return err +} + +// Scan extracts a PiecePrivateKey from a database field +func (key *PiecePrivateKey) Scan(src interface{}) (err error) { + b, ok := src.([]byte) + if !ok { + return ErrPieceKey.New("PiecePrivateKey Scan expects []byte") + } + n, err := PiecePrivateKeyFromBytes(b) + *key = n + return err +} diff --git a/vendor/storj.io/common/storj/redundancy.go b/vendor/storj.io/common/storj/redundancy.go new file mode 100644 index 000000000..ad049a47f --- /dev/null +++ b/vendor/storj.io/common/storj/redundancy.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +// RedundancyScheme specifies the parameters and the algorithm for redundancy +type RedundancyScheme struct { + // Algorithm determines the algorithm to be used for redundancy. + Algorithm RedundancyAlgorithm + + // ShareSize is the size in bytes for each erasure shares. + ShareSize int32 + + // RequiredShares is the minimum number of shares required to recover a + // stripe, reed-solomon k. + RequiredShares int16 + // RepairShares is the minimum number of safe shares that can remain + // before a repair is triggered. + RepairShares int16 + // OptimalShares is the desired total number of shares for a segment. + OptimalShares int16 + // TotalShares is the number of shares to encode. If it is larger than + // OptimalShares, slower uploads of the excess shares will be aborted in + // order to improve performance. + TotalShares int16 +} + +// IsZero returns true if no field in the struct is set to non-zero value +func (scheme RedundancyScheme) IsZero() bool { + return scheme == (RedundancyScheme{}) +} + +// StripeSize is the number of bytes for a stripe. +// Stripes are erasure encoded and split into n shares, where we need k to +// reconstruct the stripe. Therefore a stripe size is the erasure share size +// times the required shares, k. +func (scheme RedundancyScheme) StripeSize() int32 { + return scheme.ShareSize * int32(scheme.RequiredShares) +} + +// DownloadNodes calculates the number of nodes needed to download in the +// presence of node failure based on t = k + (n-o)k/o. +func (scheme RedundancyScheme) DownloadNodes() int32 { + extra := int32(1) + + if scheme.OptimalShares > 0 { + extra = int32(((scheme.TotalShares - scheme.OptimalShares) * scheme.RequiredShares) / scheme.OptimalShares) + if extra == 0 { + // ensure there is at least one extra node, so we can have error detection/correction + extra = 1 + } + } + + needed := int32(scheme.RequiredShares) + extra + + if needed > int32(scheme.TotalShares) { + needed = int32(scheme.TotalShares) + } + return needed +} + +// RedundancyAlgorithm is the algorithm used for redundancy +type RedundancyAlgorithm byte + +// List of supported redundancy algorithms +const ( + InvalidRedundancyAlgorithm = RedundancyAlgorithm(iota) + ReedSolomon +) diff --git a/vendor/storj.io/common/storj/segment.go b/vendor/storj.io/common/storj/segment.go new file mode 100644 index 000000000..520450cbb --- /dev/null +++ b/vendor/storj.io/common/storj/segment.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +// SegmentPosition segment position in object +type SegmentPosition struct { + PartNumber int32 + Index int32 +} + +// SegmentListItem represents listed segment +type SegmentListItem struct { + Position SegmentPosition +} + +// SegmentDownloadInfo represents segment download information inline/remote +type SegmentDownloadInfo struct { + SegmentID SegmentID + Size int64 + EncryptedInlineData []byte + Next SegmentPosition + PiecePrivateKey PiecePrivateKey + + SegmentEncryption SegmentEncryption +} + +// SegmentEncryption represents segment encryption key and nonce +type SegmentEncryption struct { + EncryptedKeyNonce Nonce + EncryptedKey EncryptedPrivateKey +} diff --git a/vendor/storj.io/common/storj/segmentid.go b/vendor/storj.io/common/storj/segmentid.go new file mode 100644 index 000000000..8d9250e65 --- /dev/null +++ b/vendor/storj.io/common/storj/segmentid.go @@ -0,0 +1,81 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "encoding/base32" + + "github.com/zeebo/errs" +) + +// ErrSegmentID is used when something goes wrong with a segment ID +var ErrSegmentID = errs.Class("segment ID error") + +// segmentIDEncoding is base32 without padding +var segmentIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// SegmentID is the unique identifier for segment related to object +type SegmentID []byte + +// SegmentIDFromString decodes an base32 encoded +func SegmentIDFromString(s string) (SegmentID, error) { + idBytes, err := segmentIDEncoding.DecodeString(s) + if err != nil { + return SegmentID{}, ErrSegmentID.Wrap(err) + } + return SegmentIDFromBytes(idBytes) +} + +// SegmentIDFromBytes converts a byte slice into a segment ID +func SegmentIDFromBytes(b []byte) (SegmentID, error) { + // return error will be used in future implementation + id := make([]byte, len(b)) + copy(id, b) + return id, nil +} + +// IsZero returns whether segment ID is unassigned +func (id SegmentID) IsZero() bool { + return len(id) == 0 +} + +// String representation of the segment ID +func (id SegmentID) String() string { return segmentIDEncoding.EncodeToString(id.Bytes()) } + +// Bytes returns bytes of the segment ID +func (id SegmentID) Bytes() []byte { return id[:] } + +// Marshal serializes a segment ID (implements gogo's custom type interface) +func (id SegmentID) Marshal() ([]byte, error) { + return id.Bytes(), nil +} + +// MarshalTo serializes a segment ID into the passed byte slice (implements gogo's custom type interface) +func (id *SegmentID) MarshalTo(data []byte) (n int, err error) { + return copy(data, id.Bytes()), nil +} + +// Unmarshal deserializes a segment ID (implements gogo's custom type interface) +func (id *SegmentID) Unmarshal(data []byte) error { + var err error + *id, err = SegmentIDFromBytes(data) + return err +} + +// Size returns the length of a segment ID (implements gogo's custom type interface) +func (id SegmentID) Size() int { + return len(id) +} + +// MarshalJSON serializes a segment ID to a json string as bytes (implements gogo's custom type interface) +func (id SegmentID) MarshalJSON() ([]byte, error) { + return []byte(`"` + id.String() + `"`), nil +} + +// UnmarshalJSON deserializes a json string (as bytes) to a segment ID (implements gogo's custom type interface) +func (id *SegmentID) UnmarshalJSON(data []byte) error { + var err error + *id, err = SegmentIDFromString(string(data)) + return err +} diff --git a/vendor/storj.io/common/storj/serialnumber.go b/vendor/storj.io/common/storj/serialnumber.go new file mode 100644 index 000000000..525a742f0 --- /dev/null +++ b/vendor/storj.io/common/storj/serialnumber.go @@ -0,0 +1,117 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "database/sql/driver" + "encoding/base32" + + "github.com/zeebo/errs" +) + +// ErrSerialNumber is used when something goes wrong with a serial number +var ErrSerialNumber = errs.Class("serial number error") + +// serialNumberEncoding is base32 without padding +var serialNumberEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// SerialNumber is the unique identifier for pieces +type SerialNumber [16]byte + +// SerialNumberFromString decodes an base32 encoded +func SerialNumberFromString(s string) (SerialNumber, error) { + idBytes, err := serialNumberEncoding.DecodeString(s) + if err != nil { + return SerialNumber{}, ErrNodeID.Wrap(err) + } + return SerialNumberFromBytes(idBytes) +} + +// SerialNumberFromBytes converts a byte slice into a serial number +func SerialNumberFromBytes(b []byte) (SerialNumber, error) { + if len(b) != len(SerialNumber{}) { + return SerialNumber{}, ErrSerialNumber.New("not enough bytes to make a serial number; have %d, need %d", len(b), len(NodeID{})) + } + + var id SerialNumber + copy(id[:], b) + return id, nil +} + +// IsZero returns whether serial number is unassigned +func (id SerialNumber) IsZero() bool { + return id == SerialNumber{} +} + +// Less returns whether id is smaller than other in lexicographic order. +func (id SerialNumber) Less(other SerialNumber) bool { + for k, v := range id { + if v < other[k] { + return true + } else if v > other[k] { + return false + } + } + return false +} + +// String representation of the serial number +func (id SerialNumber) String() string { return serialNumberEncoding.EncodeToString(id.Bytes()) } + +// Bytes returns bytes of the serial number +func (id SerialNumber) Bytes() []byte { return id[:] } + +// Marshal serializes a serial number +func (id SerialNumber) Marshal() ([]byte, error) { + return id.Bytes(), nil +} + +// MarshalTo serializes a serial number into the passed byte slice +func (id *SerialNumber) MarshalTo(data []byte) (n int, err error) { + n = copy(data, id.Bytes()) + return n, nil +} + +// Unmarshal deserializes a serial number +func (id *SerialNumber) Unmarshal(data []byte) error { + var err error + *id, err = SerialNumberFromBytes(data) + return err +} + +// Size returns the length of a serial number (implements gogo's custom type interface) +func (id *SerialNumber) Size() int { + return len(id) +} + +// MarshalJSON serializes a serial number to a json string as bytes +func (id SerialNumber) MarshalJSON() ([]byte, error) { + return []byte(`"` + id.String() + `"`), nil +} + +// UnmarshalJSON deserializes a json string (as bytes) to a serial number +func (id *SerialNumber) UnmarshalJSON(data []byte) error { + var err error + *id, err = SerialNumberFromString(string(data)) + if err != nil { + return err + } + return nil +} + +// Value set a SerialNumber to a database field +func (id SerialNumber) Value() (driver.Value, error) { + return id.Bytes(), nil +} + +// Scan extracts a SerialNumber from a database field +func (id *SerialNumber) Scan(src interface{}) (err error) { + b, ok := src.([]byte) + if !ok { + return ErrSerialNumber.New("SerialNumber Scan expects []byte") + } + n, err := SerialNumberFromBytes(b) + *id = n + return err +} diff --git a/vendor/storj.io/common/storj/streamid.go b/vendor/storj.io/common/storj/streamid.go new file mode 100644 index 000000000..91c3a6bc9 --- /dev/null +++ b/vendor/storj.io/common/storj/streamid.go @@ -0,0 +1,101 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package storj + +import ( + "database/sql/driver" + "encoding/base32" + + "github.com/zeebo/errs" +) + +// ErrStreamID is used when something goes wrong with a stream ID +var ErrStreamID = errs.Class("stream ID error") + +// streamIDEncoding is base32 without padding +var streamIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// StreamID is the unique identifier for stream related to object +type StreamID []byte + +// StreamIDFromString decodes an base32 encoded +func StreamIDFromString(s string) (StreamID, error) { + idBytes, err := streamIDEncoding.DecodeString(s) + if err != nil { + return StreamID{}, ErrStreamID.Wrap(err) + } + return StreamIDFromBytes(idBytes) +} + +// StreamIDFromBytes converts a byte slice into a stream ID +func StreamIDFromBytes(b []byte) (StreamID, error) { + id := make([]byte, len(b)) + copy(id, b) + return id, nil +} + +// IsZero returns whether stream ID is unassigned +func (id StreamID) IsZero() bool { + return len(id) == 0 +} + +// String representation of the stream ID +func (id StreamID) String() string { return streamIDEncoding.EncodeToString(id.Bytes()) } + +// Bytes returns bytes of the stream ID +func (id StreamID) Bytes() []byte { return id[:] } + +// Marshal serializes a stream ID +func (id StreamID) Marshal() ([]byte, error) { + return id.Bytes(), nil +} + +// MarshalTo serializes a stream ID into the passed byte slice +func (id *StreamID) MarshalTo(data []byte) (n int, err error) { + n = copy(data, id.Bytes()) + return n, nil +} + +// Unmarshal deserializes a stream ID +func (id *StreamID) Unmarshal(data []byte) error { + var err error + *id, err = StreamIDFromBytes(data) + return err +} + +// Size returns the length of a stream ID (implements gogo's custom type interface) +func (id StreamID) Size() int { + return len(id) +} + +// MarshalJSON serializes a stream ID to a json string as bytes +func (id StreamID) MarshalJSON() ([]byte, error) { + return []byte(`"` + id.String() + `"`), nil +} + +// UnmarshalJSON deserializes a json string (as bytes) to a stream ID +func (id *StreamID) UnmarshalJSON(data []byte) error { + var err error + *id, err = StreamIDFromString(string(data)) + if err != nil { + return err + } + return nil +} + +// Value set a stream ID to a database field +func (id StreamID) Value() (driver.Value, error) { + return id.Bytes(), nil +} + +// Scan extracts a stream ID from a database field +func (id *StreamID) Scan(src interface{}) (err error) { + b, ok := src.([]byte) + if !ok { + return ErrStreamID.New("Stream ID Scan expects []byte") + } + n, err := StreamIDFromBytes(b) + *id = n + return err +} diff --git a/vendor/storj.io/common/sync2/cooldown.go b/vendor/storj.io/common/sync2/cooldown.go new file mode 100644 index 000000000..97c399226 --- /dev/null +++ b/vendor/storj.io/common/sync2/cooldown.go @@ -0,0 +1,139 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" +) + +// Cooldown implements an event that can only occur once in a given timeframe. +// +// Cooldown control methods PANICS after Close has been called and don't have any +// effect after Stop has been called. +// +// Start or Run (only one of them, not both) must be only called once. +type Cooldown struct { + noCopy noCopy // nolint: structcheck + + stopsent int32 + runexec int32 + + interval time.Duration + + init sync.Once + trigger chan struct{} + stopping chan struct{} + stopped chan struct{} +} + +// NewCooldown creates a new cooldown with the specified interval. +func NewCooldown(interval time.Duration) *Cooldown { + cooldown := &Cooldown{} + cooldown.SetInterval(interval) + return cooldown +} + +// SetInterval allows to change the interval before starting. +func (cooldown *Cooldown) SetInterval(interval time.Duration) { + cooldown.interval = interval +} + +func (cooldown *Cooldown) initialize() { + cooldown.init.Do(func() { + cooldown.stopped = make(chan struct{}) + cooldown.stopping = make(chan struct{}) + cooldown.trigger = make(chan struct{}, 1) + }) +} + +// Start runs the specified function with an errgroup. +func (cooldown *Cooldown) Start(ctx context.Context, group *errgroup.Group, fn func(ctx context.Context) error) { + atomic.StoreInt32(&cooldown.runexec, 1) + group.Go(func() error { + return cooldown.Run(ctx, fn) + }) +} + +// Run waits for a message on the trigger channel, then runs the specified function. +// Afterwards it will sleep for the cooldown duration and drain the trigger channel. +// +// Run PANICS if it's called after Stop has been called. +func (cooldown *Cooldown) Run(ctx context.Context, fn func(ctx context.Context) error) error { + atomic.StoreInt32(&cooldown.runexec, 1) + cooldown.initialize() + defer close(cooldown.stopped) + for { + // prioritize stopping messages + select { + case <-cooldown.stopping: + return nil + case <-ctx.Done(): + return ctx.Err() + default: + } + + // handle trigger message + select { + case <-cooldown.trigger: + // trigger the function + if err := fn(ctx); err != nil { + return err + } + if !Sleep(ctx, cooldown.interval) { + return ctx.Err() + } + + // drain the channel to prevent messages received during sleep from triggering the function again + select { + case <-cooldown.trigger: + default: + } + case <-ctx.Done(): + return ctx.Err() + case <-cooldown.stopping: + return nil + } + + } +} + +// Close closes all resources associated with it. +// +// It MUST NOT be called concurrently. +func (cooldown *Cooldown) Close() { + cooldown.Stop() + + if atomic.LoadInt32(&cooldown.runexec) == 1 { + <-cooldown.stopped + } + + close(cooldown.trigger) +} + +// Stop stops the cooldown permanently. +func (cooldown *Cooldown) Stop() { + cooldown.initialize() + if atomic.CompareAndSwapInt32(&cooldown.stopsent, 0, 1) { + close(cooldown.stopping) + } + + if atomic.LoadInt32(&cooldown.runexec) == 1 { + <-cooldown.stopped + } +} + +// Trigger attempts to run the cooldown function. +// If the timer has not expired, the function will not run. +func (cooldown *Cooldown) Trigger() { + cooldown.initialize() + select { + case cooldown.trigger <- struct{}{}: + default: + } +} diff --git a/vendor/storj.io/common/sync2/copy.go b/vendor/storj.io/common/sync2/copy.go new file mode 100644 index 000000000..360f97d4e --- /dev/null +++ b/vendor/storj.io/common/sync2/copy.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "io" + + "github.com/spacemonkeygo/monkit/v3" +) + +var mon = monkit.Package() + +type readerFunc func(p []byte) (n int, err error) + +func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } + +// Copy implements copying with cancellation +func Copy(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) { + defer mon.Task()(&ctx)(&err) + written, err = io.Copy(dst, readerFunc(func(p []byte) (int, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + return src.Read(p) + } + })) + + return written, err +} diff --git a/vendor/storj.io/common/sync2/cycle.go b/vendor/storj.io/common/sync2/cycle.go new file mode 100644 index 000000000..058245518 --- /dev/null +++ b/vendor/storj.io/common/sync2/cycle.go @@ -0,0 +1,224 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "sync" + "sync/atomic" + "time" + + monkit "github.com/spacemonkeygo/monkit/v3" + "golang.org/x/sync/errgroup" +) + +// Cycle implements a controllable recurring event. +// +// Cycle control methods PANICS after Close has been called and don't have any +// effect after Stop has been called. +// +// Start or Run (only one of them, not both) must be only called once. +type Cycle struct { + noCopy noCopy // nolint: structcheck + + stopsent int32 + runexec int32 + + interval time.Duration + + ticker *time.Ticker + control chan interface{} + + stopping chan struct{} + stopped chan struct{} + + init sync.Once +} + +type ( + // cycle control messages + cyclePause struct{} + cycleContinue struct{} + cycleChangeInterval struct{ Interval time.Duration } + cycleTrigger struct{ done chan struct{} } +) + +// NewCycle creates a new cycle with the specified interval. +func NewCycle(interval time.Duration) *Cycle { + cycle := &Cycle{} + cycle.SetInterval(interval) + return cycle +} + +// SetInterval allows to change the interval before starting. +func (cycle *Cycle) SetInterval(interval time.Duration) { + cycle.interval = interval +} + +func (cycle *Cycle) initialize() { + cycle.init.Do(func() { + cycle.stopped = make(chan struct{}) + cycle.stopping = make(chan struct{}) + cycle.control = make(chan interface{}) + }) +} + +// Start runs the specified function with an errgroup. +func (cycle *Cycle) Start(ctx context.Context, group *errgroup.Group, fn func(ctx context.Context) error) { + atomic.CompareAndSwapInt32(&cycle.runexec, 0, 1) + group.Go(func() error { + return cycle.Run(ctx, fn) + }) +} + +// Run runs the specified in an interval. +// +// Every interval `fn` is started. +// When `fn` is not fast enough, it may skip some of those executions. +// +// Run PANICS if it's called after Stop has been called. +func (cycle *Cycle) Run(ctx context.Context, fn func(ctx context.Context) error) error { + atomic.CompareAndSwapInt32(&cycle.runexec, 0, 1) + cycle.initialize() + defer close(cycle.stopped) + + currentInterval := cycle.interval + cycle.ticker = time.NewTicker(currentInterval) + defer cycle.ticker.Stop() + + choreCtx := monkit.ResetContextSpan(ctx) + + if err := fn(choreCtx); err != nil { + return err + } + for { + // prioritize stopping messages + select { + case <-cycle.stopping: + return nil + + case <-ctx.Done(): + // handle control messages + return ctx.Err() + + default: + } + + // handle other messages as well + select { + + case message := <-cycle.control: + // handle control messages + + switch message := message.(type) { + + case cycleChangeInterval: + currentInterval = message.Interval + cycle.ticker.Stop() + cycle.ticker = time.NewTicker(currentInterval) + + case cyclePause: + cycle.ticker.Stop() + // ensure we don't have ticks left + select { + case <-cycle.ticker.C: + default: + } + + case cycleContinue: + cycle.ticker.Stop() + cycle.ticker = time.NewTicker(currentInterval) + + case cycleTrigger: + // trigger the function + if err := fn(choreCtx); err != nil { + return err + } + if message.done != nil { + close(message.done) + } + } + + case <-cycle.stopping: + return nil + + case <-ctx.Done(): + // handle control messages + return ctx.Err() + + case <-cycle.ticker.C: + // trigger the function + if err := fn(choreCtx); err != nil { + return err + } + } + } +} + +// Close closes all resources associated with it. +// +// It MUST NOT be called concurrently. +func (cycle *Cycle) Close() { + cycle.Stop() + + if atomic.LoadInt32(&cycle.runexec) == 1 { + <-cycle.stopped + } + + close(cycle.control) +} + +// sendControl sends a control message +func (cycle *Cycle) sendControl(message interface{}) { + cycle.initialize() + select { + case cycle.control <- message: + case <-cycle.stopped: + } +} + +// Stop stops the cycle permanently +func (cycle *Cycle) Stop() { + cycle.initialize() + if atomic.CompareAndSwapInt32(&cycle.stopsent, 0, 1) { + close(cycle.stopping) + } + + if atomic.LoadInt32(&cycle.runexec) == 1 { + <-cycle.stopped + } +} + +// ChangeInterval allows to change the ticker interval after it has started. +func (cycle *Cycle) ChangeInterval(interval time.Duration) { + cycle.sendControl(cycleChangeInterval{interval}) +} + +// Pause pauses the cycle. +func (cycle *Cycle) Pause() { + cycle.sendControl(cyclePause{}) +} + +// Restart restarts the ticker from 0. +func (cycle *Cycle) Restart() { + cycle.sendControl(cycleContinue{}) +} + +// Trigger ensures that the loop is done at least once. +// If it's currently running it waits for the previous to complete and then runs. +func (cycle *Cycle) Trigger() { + cycle.sendControl(cycleTrigger{}) +} + +// TriggerWait ensures that the loop is done at least once and waits for completion. +// If it's currently running it waits for the previous to complete and then runs. +func (cycle *Cycle) TriggerWait() { + done := make(chan struct{}) + + cycle.sendControl(cycleTrigger{done}) + select { + case <-done: + case <-cycle.stopped: + } +} diff --git a/vendor/storj.io/common/sync2/doc.go b/vendor/storj.io/common/sync2/doc.go new file mode 100644 index 000000000..13dba7d14 --- /dev/null +++ b/vendor/storj.io/common/sync2/doc.go @@ -0,0 +1,12 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +/*Package sync2 provides a set of functions and types for: + +* Having context aware functionalities which aren't present in the standard + library. +* For offloading memory through the file system. +* To control execution of tasks which can run repetitively, concurrently or + asynchronously. +*/ +package sync2 diff --git a/vendor/storj.io/common/sync2/fence.go b/vendor/storj.io/common/sync2/fence.go new file mode 100644 index 000000000..c6539f8c2 --- /dev/null +++ b/vendor/storj.io/common/sync2/fence.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "sync" +) + +// Fence allows to wait for something to happen. +type Fence struct { + noCopy noCopy // nolint: structcheck + + setup sync.Once + release sync.Once + done chan struct{} +} + +// init sets up the initial lock into wait +func (fence *Fence) init() { + fence.setup.Do(func() { + fence.done = make(chan struct{}) + }) +} + +// Release releases everyone from Wait +func (fence *Fence) Release() { + fence.init() + fence.release.Do(func() { close(fence.done) }) +} + +// Wait waits for wait to be unlocked. +// Returns true when it was successfully released. +func (fence *Fence) Wait(ctx context.Context) bool { + fence.init() + + select { + case <-fence.done: + return true + default: + select { + case <-ctx.Done(): + return false + case <-fence.done: + return true + } + } +} + +// Released returns whether the fence has been released. +func (fence *Fence) Released() bool { + fence.init() + + select { + case <-fence.done: + return true + default: + return false + } +} + +// Done returns channel that will be closed when the fence is released. +func (fence *Fence) Done() chan struct{} { + fence.init() + return fence.done +} diff --git a/vendor/storj.io/common/sync2/io.go b/vendor/storj.io/common/sync2/io.go new file mode 100644 index 000000000..06ce0f000 --- /dev/null +++ b/vendor/storj.io/common/sync2/io.go @@ -0,0 +1,81 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "io" + "os" + "sync/atomic" +) + +// ReadAtWriteAtCloser implements all io.ReaderAt, io.WriterAt and io.Closer +type ReadAtWriteAtCloser interface { + io.ReaderAt + io.WriterAt + io.Closer +} + +// PipeWriter allows closing the writer with an error +type PipeWriter interface { + io.WriteCloser + CloseWithError(reason error) error +} + +// PipeReader allows closing the reader with an error +type PipeReader interface { + io.ReadCloser + CloseWithError(reason error) error +} + +// memory implements ReadAtWriteAtCloser on a memory buffer +type memory []byte + +// Size returns size of memory buffer +func (memory memory) Size() int { return len(memory) } + +// ReadAt implements io.ReaderAt methods +func (memory memory) ReadAt(data []byte, at int64) (amount int, err error) { + if at > int64(len(memory)) { + return 0, io.ErrClosedPipe + } + amount = copy(data, memory[at:]) + return amount, nil +} + +// WriteAt implements io.WriterAt methods +func (memory memory) WriteAt(data []byte, at int64) (amount int, err error) { + if at > int64(len(memory)) { + return 0, io.ErrClosedPipe + } + amount = copy(memory[at:], data) + return amount, nil +} + +// Close implements io.Closer implementation +func (memory memory) Close() error { return nil } + +// offsetFile implements ReadAt, WriteAt offset to the file with reference counting +type offsetFile struct { + file *os.File + offset int64 + open *int64 // number of handles open +} + +// ReadAt implements io.ReaderAt methods +func (file offsetFile) ReadAt(data []byte, at int64) (amount int, err error) { + return file.file.ReadAt(data, file.offset+at) +} + +// WriteAt implements io.WriterAt methods +func (file offsetFile) WriteAt(data []byte, at int64) (amount int, err error) { + return file.file.WriteAt(data, file.offset+at) +} + +// Close implements io.Closer methods +func (file offsetFile) Close() error { + if atomic.AddInt64(file.open, -1) == 0 { + return file.file.Close() + } + return nil +} diff --git a/vendor/storj.io/common/sync2/limiter.go b/vendor/storj.io/common/sync2/limiter.go new file mode 100644 index 000000000..d311ee223 --- /dev/null +++ b/vendor/storj.io/common/sync2/limiter.go @@ -0,0 +1,52 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "sync" +) + +// Limiter implements concurrent goroutine limiting. +type Limiter struct { + noCopy noCopy // nolint: structcheck + + limit chan struct{} + working sync.WaitGroup +} + +// NewLimiter creates a new limiter with limit set to n. +func NewLimiter(n int) *Limiter { + limiter := &Limiter{} + limiter.limit = make(chan struct{}, n) + return limiter +} + +// Go tries to start fn as a goroutine. +// When the limit is reached it will wait until it can run it +// or the context is canceled. +func (limiter *Limiter) Go(ctx context.Context, fn func()) bool { + select { + case limiter.limit <- struct{}{}: + case <-ctx.Done(): + return false + } + + limiter.working.Add(1) + go func() { + defer func() { + <-limiter.limit + limiter.working.Done() + }() + + fn() + }() + + return true +} + +// Wait waits for all running goroutines to finish. +func (limiter *Limiter) Wait() { + limiter.working.Wait() +} diff --git a/vendor/storj.io/common/sync2/nocopy.go b/vendor/storj.io/common/sync2/nocopy.go new file mode 100644 index 000000000..8d82926a4 --- /dev/null +++ b/vendor/storj.io/common/sync2/nocopy.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +// noCopy is used to ensure that we don't copy things that shouldn't +// be copied. +// +// See https://golang.org/issues/8005#issuecomment-190753527. +// +// Currently users of noCopy must use "// nolint: structcheck", +// because golint-ci does not handle this correctly. +type noCopy struct{} + +func (noCopy) Lock() {} diff --git a/vendor/storj.io/common/sync2/parent_child_limiter.go b/vendor/storj.io/common/sync2/parent_child_limiter.go new file mode 100644 index 000000000..6f3821150 --- /dev/null +++ b/vendor/storj.io/common/sync2/parent_child_limiter.go @@ -0,0 +1,59 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "sync" +) + +// ParentLimiter limits the concurrent goroutines that children run. +// See Child method. +type ParentLimiter struct { + limiter *Limiter +} + +// NewParentLimiter creates a new ParentLimiter with limit set to n. +func NewParentLimiter(n int) *ParentLimiter { + return &ParentLimiter{ + limiter: NewLimiter(n), + } +} + +// Child create a new parent's child. +func (parent *ParentLimiter) Child() *ChildLimiter { + return &ChildLimiter{ + parentLimiter: parent.limiter, + } +} + +// Wait waits for all the children's running goroutines to finish. +func (parent *ParentLimiter) Wait() { + parent.limiter.Wait() +} + +// ChildLimiter limits concurrent goroutines by its parent limit +// (ParentLimiter). +type ChildLimiter struct { + parentLimiter *Limiter + working sync.WaitGroup +} + +// Go tries to start fn as a goroutine. +// When the parent limit is reached it will wait until it can run it or the +// context is canceled. +// Cancel the context only interrupt the child goroutines waiting to run. +func (child *ChildLimiter) Go(ctx context.Context, fn func()) bool { + child.working.Add(1) + + return child.parentLimiter.Go(ctx, func() { + defer child.working.Done() + fn() + }) +} + +// Wait waits for all the child's running goroutines to finish. +func (child *ChildLimiter) Wait() { + child.working.Wait() +} diff --git a/vendor/storj.io/common/sync2/pipe.go b/vendor/storj.io/common/sync2/pipe.go new file mode 100644 index 000000000..5e639d65e --- /dev/null +++ b/vendor/storj.io/common/sync2/pipe.go @@ -0,0 +1,280 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "sync" +) + +// pipe is a io.Reader/io.Writer pipe backed by ReadAtWriteAtCloser +type pipe struct { + noCopy noCopy // nolint: structcheck + + buffer ReadAtWriteAtCloser + + mu sync.Mutex + nodata sync.Cond + read int64 + write int64 + limit int64 + + writerDone bool + writerErr error + + readerDone bool + readerErr error +} + +// NewPipeFile returns a pipe that uses file-system to offload memory +func NewPipeFile(tempdir string) (PipeReader, PipeWriter, error) { + tempfile, err := ioutil.TempFile(tempdir, "filepipe") + if err != nil { + return nil, nil, err + } + + handles := int64(2) + pipe := &pipe{ + buffer: &offsetFile{ + file: tempfile, + open: &handles, + }, + limit: math.MaxInt64, + } + pipe.nodata.L = &pipe.mu + + return pipeReader{pipe}, pipeWriter{pipe}, nil +} + +// NewPipeMemory returns a pipe that uses an in-memory buffer +func NewPipeMemory(pipeSize int64) (PipeReader, PipeWriter, error) { + pipe := &pipe{ + buffer: make(memory, pipeSize), + limit: pipeSize, + } + pipe.nodata.L = &pipe.mu + return pipeReader{pipe}, pipeWriter{pipe}, nil +} + +type pipeReader struct{ pipe *pipe } +type pipeWriter struct{ pipe *pipe } + +// Close implements io.Reader Close +func (reader pipeReader) Close() error { return reader.CloseWithError(nil) } + +// Close implements io.Writer Close +func (writer pipeWriter) Close() error { return writer.CloseWithError(nil) } + +// CloseWithError implements closing with error +func (reader pipeReader) CloseWithError(err error) error { + if err == nil { + err = io.ErrClosedPipe + } + + pipe := reader.pipe + pipe.mu.Lock() + if pipe.readerDone { + pipe.mu.Unlock() + return io.ErrClosedPipe + } + pipe.readerDone = true + pipe.readerErr = err + pipe.mu.Unlock() + + return pipe.buffer.Close() +} + +// CloseWithError implements closing with error +func (writer pipeWriter) CloseWithError(err error) error { + if err == nil { + err = io.EOF + } + + pipe := writer.pipe + pipe.mu.Lock() + if pipe.writerDone { + pipe.mu.Unlock() + return io.ErrClosedPipe + } + pipe.writerDone = true + pipe.writerErr = err + pipe.nodata.Broadcast() + pipe.mu.Unlock() + + return pipe.buffer.Close() +} + +// Write writes to the pipe returning io.ErrClosedPipe when pipeSize is reached +func (writer pipeWriter) Write(data []byte) (n int, err error) { + pipe := writer.pipe + pipe.mu.Lock() + + // has the reader finished? + if pipe.readerDone { + pipe.mu.Unlock() + return 0, pipe.readerErr + } + + // have we closed already + if pipe.writerDone { + pipe.mu.Unlock() + return 0, io.ErrClosedPipe + } + + // check how much do they want to write + canWrite := pipe.limit - pipe.write + + // no more room to write + if canWrite == 0 { + pipe.mu.Unlock() + return 0, io.ErrClosedPipe + } + + // figure out how much to write + toWrite := int64(len(data)) + if toWrite > canWrite { + toWrite = canWrite + } + + writeAt := pipe.write + pipe.mu.Unlock() + + // write data to buffer + writeAmount, err := pipe.buffer.WriteAt(data[:toWrite], writeAt) + + pipe.mu.Lock() + // update writing head + pipe.write += int64(writeAmount) + // wake up reader + pipe.nodata.Broadcast() + // check whether we have finished + done := pipe.write >= pipe.limit + pipe.mu.Unlock() + + if err == nil && done { + err = io.ErrClosedPipe + } + return writeAmount, err +} + +// Read reads from the pipe returning io.EOF when writer is closed or pipeSize is reached +func (reader pipeReader) Read(data []byte) (n int, err error) { + pipe := reader.pipe + pipe.mu.Lock() + // wait until we have something to read + for pipe.read >= pipe.write { + // has the writer finished? + if pipe.writerDone { + pipe.mu.Unlock() + return 0, pipe.writerErr + } + + // have we closed already + if pipe.readerDone { + pipe.mu.Unlock() + return 0, io.ErrClosedPipe + } + + // have we run out of the limit + if pipe.read >= pipe.limit { + pipe.mu.Unlock() + return 0, io.EOF + } + + // ok, lets wait + pipe.nodata.Wait() + } + + // how much there's available for reading + canRead := pipe.write - pipe.read + // how much do they want to read? + toRead := int64(len(data)) + if toRead > canRead { + toRead = canRead + } + readAt := pipe.read + pipe.mu.Unlock() + + // read data + readAmount, err := pipe.buffer.ReadAt(data[:toRead], readAt) + + pipe.mu.Lock() + // update info on how much we have read + pipe.read += int64(readAmount) + done := pipe.read >= pipe.limit + pipe.mu.Unlock() + + if err == nil && done { + err = io.EOF + } + return readAmount, err +} + +// MultiPipe is a multipipe backed by a single file +type MultiPipe struct { + pipes []pipe +} + +// NewMultiPipeFile returns a new MultiPipe that is created in tempdir +// if tempdir == "" the fill will be created it into os.TempDir +func NewMultiPipeFile(tempdir string, pipeCount, pipeSize int64) (*MultiPipe, error) { + tempfile, err := ioutil.TempFile(tempdir, "multifilepipe") + if err != nil { + return nil, err + } + + err = tempfile.Truncate(pipeCount * pipeSize) + if err != nil { + closeErr := tempfile.Close() + if closeErr != nil { + return nil, fmt.Errorf("%v/%v", err, closeErr) + } + return nil, err + } + + multipipe := &MultiPipe{ + pipes: make([]pipe, pipeCount), + } + + handles := 2 * pipeCount + for i := range multipipe.pipes { + pipe := &multipipe.pipes[i] + pipe.buffer = offsetFile{ + file: tempfile, + offset: int64(i) * pipeSize, + open: &handles, + } + pipe.limit = pipeSize + pipe.nodata.L = &pipe.mu + } + + return multipipe, nil +} + +// NewMultiPipeMemory returns a new MultiPipe that is using a memory buffer +func NewMultiPipeMemory(pipeCount, pipeSize int64) (*MultiPipe, error) { + buffer := make(memory, pipeCount*pipeSize) + + multipipe := &MultiPipe{ + pipes: make([]pipe, pipeCount), + } + + for i := range multipipe.pipes { + pipe := &multipipe.pipes[i] + pipe.buffer = buffer[i*int(pipeSize) : (i+1)*int(pipeSize)] + pipe.limit = pipeSize + pipe.nodata.L = &pipe.mu + } + + return multipipe, nil +} + +// Pipe returns the two ends of a block stream pipe +func (multipipe *MultiPipe) Pipe(index int) (PipeReader, PipeWriter) { + pipe := &multipipe.pipes[index] + return pipeReader{pipe}, pipeWriter{pipe} +} diff --git a/vendor/storj.io/common/sync2/semaphore.go b/vendor/storj.io/common/sync2/semaphore.go new file mode 100644 index 000000000..bd684cdac --- /dev/null +++ b/vendor/storj.io/common/sync2/semaphore.go @@ -0,0 +1,47 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + + "golang.org/x/sync/semaphore" +) + +// Semaphore implements a closable semaphore +type Semaphore struct { + noCopy noCopy // nolint: structcheck + + ctx context.Context + close func() + sema *semaphore.Weighted +} + +// NewSemaphore creates a semaphore with the specified size. +func NewSemaphore(size int) *Semaphore { + sema := &Semaphore{} + sema.Init(size) + return sema +} + +// Init initializes semaphore to the specified size. +func (sema *Semaphore) Init(size int) { + sema.ctx, sema.close = context.WithCancel(context.Background()) + sema.sema = semaphore.NewWeighted(int64(size)) +} + +// Close closes the semaphore from further use. +func (sema *Semaphore) Close() { + sema.close() +} + +// Lock locks the semaphore. +func (sema *Semaphore) Lock() bool { + return sema.sema.Acquire(sema.ctx, 1) == nil +} + +// Unlock unlocks the semaphore. +func (sema *Semaphore) Unlock() { + sema.sema.Release(1) +} diff --git a/vendor/storj.io/common/sync2/sleep.go b/vendor/storj.io/common/sync2/sleep.go new file mode 100644 index 000000000..586a05e99 --- /dev/null +++ b/vendor/storj.io/common/sync2/sleep.go @@ -0,0 +1,22 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "context" + "time" +) + +// Sleep implements sleeping with cancellation +func Sleep(ctx context.Context, duration time.Duration) bool { + timer := time.NewTimer(duration) + defer timer.Stop() + + select { + case <-ctx.Done(): + return false + case <-timer.C: + return true + } +} diff --git a/vendor/storj.io/common/sync2/success_threshold.go b/vendor/storj.io/common/sync2/success_threshold.go new file mode 100644 index 000000000..be58e93d6 --- /dev/null +++ b/vendor/storj.io/common/sync2/success_threshold.go @@ -0,0 +1,109 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package sync2 + +import ( + "context" + "math" + "sync" + "sync/atomic" + + "github.com/zeebo/errs" +) + +// SuccessThreshold tracks task formed by a known amount of concurrent tasks. +// It notifies the caller when reached a specific successful threshold without +// interrupting the remaining tasks. +type SuccessThreshold struct { + noCopy noCopy // nolint: structcheck + + toSucceed int64 + pending int64 + + successes int64 + failures int64 + + done chan struct{} + once sync.Once +} + +// NewSuccessThreshold creates a SuccessThreshold with the tasks number and +// successThreshold. +// +// It returns an error if tasks is less or equal than 1 or successThreshold +// is less or equal than 0 or greater or equal than 1. +func NewSuccessThreshold(tasks int, successThreshold float64) (*SuccessThreshold, error) { + switch { + case tasks <= 1: + return nil, errs.New( + "invalid number of tasks. It must be greater than 1, got %d", tasks, + ) + case successThreshold <= 0 || successThreshold > 1: + return nil, errs.New( + "invalid successThreshold. It must be greater than 0 and less or equal to 1, got %f", successThreshold, + ) + } + + tasksToSuccess := int64(math.Ceil(float64(tasks) * successThreshold)) + + // just in case of floating point issues + if tasksToSuccess > int64(tasks) { + tasksToSuccess = int64(tasks) + } + + return &SuccessThreshold{ + toSucceed: tasksToSuccess, + pending: int64(tasks), + done: make(chan struct{}), + }, nil +} + +// Success tells the SuccessThreshold that one tasks was successful. +func (threshold *SuccessThreshold) Success() { + atomic.AddInt64(&threshold.successes, 1) + + if atomic.AddInt64(&threshold.toSucceed, -1) <= 0 { + threshold.markAsDone() + } + + if atomic.AddInt64(&threshold.pending, -1) <= 0 { + threshold.markAsDone() + } +} + +// Failure tells the SuccessThreshold that one task was a failure. +func (threshold *SuccessThreshold) Failure() { + atomic.AddInt64(&threshold.failures, 1) + + if atomic.AddInt64(&threshold.pending, -1) <= 0 { + threshold.markAsDone() + } +} + +// Wait blocks the caller until the successThreshold is reached or all the tasks +// have finished. +func (threshold *SuccessThreshold) Wait(ctx context.Context) { + select { + case <-ctx.Done(): + case <-threshold.done: + } +} + +// markAsDone finalizes threshold closing the completed channel just once. +// It's safe to be called multiple times. +func (threshold *SuccessThreshold) markAsDone() { + threshold.once.Do(func() { + close(threshold.done) + }) +} + +// SuccessCount returns the number of successes so far. +func (threshold *SuccessThreshold) SuccessCount() int { + return int(atomic.LoadInt64(&threshold.successes)) +} + +// FailureCount returns the number of failures so far. +func (threshold *SuccessThreshold) FailureCount() int { + return int(atomic.LoadInt64(&threshold.failures)) +} diff --git a/vendor/storj.io/common/sync2/tee.go b/vendor/storj.io/common/sync2/tee.go new file mode 100644 index 000000000..2e333d26e --- /dev/null +++ b/vendor/storj.io/common/sync2/tee.go @@ -0,0 +1,204 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "io" + "sync" + "sync/atomic" + + "github.com/calebcase/tmpfile" +) + +type tee struct { + noCopy noCopy // nolint: structcheck + + buffer ReadAtWriteAtCloser + open *int64 + + mu sync.Mutex + nodata sync.Cond + noreader sync.Cond + + maxRead int64 + write int64 + + writerDone bool + writerErr error +} + +// NewTeeFile returns a tee that uses file-system to offload memory +func NewTeeFile(readers int, tempdir string) ([]PipeReader, PipeWriter, error) { + file, err := tmpfile.New(tempdir, "tee") + if err != nil { + return nil, nil, err + } + + handles := int64(readers + 1) // +1 for the writer + + buffer := &offsetFile{ + file: file, + open: &handles, + } + + return newTee(buffer, readers, &handles) +} + +// NewTeeInmemory returns a tee that uses inmemory +func NewTeeInmemory(readers int, allocMemory int64) ([]PipeReader, PipeWriter, error) { + handles := int64(readers + 1) // +1 for the writer + memory := memory(make([]byte, allocMemory)) + return newTee(memory, readers, &handles) +} + +func newTee(buffer ReadAtWriteAtCloser, readers int, open *int64) ([]PipeReader, PipeWriter, error) { + tee := &tee{ + buffer: buffer, + open: open, + } + tee.nodata.L = &tee.mu + tee.noreader.L = &tee.mu + + teeReaders := make([]PipeReader, readers) + for i := 0; i < readers; i++ { + teeReaders[i] = &teeReader{tee: tee} + } + + return teeReaders, &teeWriter{tee}, nil +} + +type teeReader struct { + tee *tee + pos int64 + closed int32 +} + +type teeWriter struct{ tee *tee } + +// Read reads from the tee returning io.EOF when writer is closed or bufSize is reached. +// +// It will block if the writer has not provided the data yet. +func (reader *teeReader) Read(data []byte) (n int, err error) { + tee := reader.tee + tee.mu.Lock() + + // fail fast on writer error + if tee.writerErr != nil && tee.writerErr != io.EOF { + tee.mu.Unlock() + return 0, tee.writerErr + } + + toRead := int64(len(data)) + end := reader.pos + toRead + + if end > tee.maxRead { + tee.maxRead = end + tee.noreader.Broadcast() + } + + // wait until we have any data to read + for reader.pos >= tee.write { + // has the writer finished? + if tee.writerDone { + tee.mu.Unlock() + return 0, tee.writerErr + } + + // ok, let's wait + tee.nodata.Wait() + } + + // how much there's available for reading + canRead := tee.write - reader.pos + if toRead > canRead { + toRead = canRead + } + readAt := reader.pos + tee.mu.Unlock() + + // read data + readAmount, err := tee.buffer.ReadAt(data[:toRead], readAt) + reader.pos += int64(readAmount) + + return readAmount, err +} + +// Write writes to the buffer returning io.ErrClosedPipe when limit is reached +// +// It will block until at least one reader require the data. +func (writer *teeWriter) Write(data []byte) (n int, err error) { + tee := writer.tee + tee.mu.Lock() + + // have we closed already + if tee.writerDone { + tee.mu.Unlock() + return 0, io.ErrClosedPipe + } + + for tee.write > tee.maxRead { + // are all readers already closed? + if atomic.LoadInt64(tee.open) <= 1 { + tee.mu.Unlock() + return 0, io.ErrClosedPipe + } + // wait until new data is required by any reader + tee.noreader.Wait() + } + + writeAt := tee.write + tee.mu.Unlock() + + // write data to buffer + writeAmount, err := tee.buffer.WriteAt(data, writeAt) + + tee.mu.Lock() + // update writing head + tee.write += int64(writeAmount) + // wake up reader + tee.nodata.Broadcast() + tee.mu.Unlock() + + return writeAmount, err +} + +// Close implements io.Reader Close +func (reader *teeReader) Close() error { return reader.CloseWithError(nil) } + +// Close implements io.Writer Close +func (writer *teeWriter) Close() error { return writer.CloseWithError(nil) } + +// CloseWithError implements closing with error +func (reader *teeReader) CloseWithError(reason error) (err error) { + tee := reader.tee + if atomic.CompareAndSwapInt32(&reader.closed, 0, 1) { + err = tee.buffer.Close() + } + + tee.mu.Lock() + tee.noreader.Broadcast() + tee.mu.Unlock() + + return err +} + +// CloseWithError implements closing with error +func (writer *teeWriter) CloseWithError(reason error) error { + if reason == nil { + reason = io.EOF + } + + tee := writer.tee + tee.mu.Lock() + if tee.writerDone { + tee.mu.Unlock() + return io.ErrClosedPipe + } + tee.writerDone = true + tee.writerErr = reason + tee.nodata.Broadcast() + tee.mu.Unlock() + + return tee.buffer.Close() +} diff --git a/vendor/storj.io/common/sync2/throttle.go b/vendor/storj.io/common/sync2/throttle.go new file mode 100644 index 000000000..9c5d4143a --- /dev/null +++ b/vendor/storj.io/common/sync2/throttle.go @@ -0,0 +1,130 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "sync" +) + +// Throttle implements two-sided throttling, between a consumer and producer +type Throttle struct { + noCopy noCopy // nolint: structcheck + + mu sync.Mutex + consumer sync.Cond + producer sync.Cond + + // error tracking for terminating Consume and Allocate + errs []error + + // how much is available in the throttle + // consumer decreases availability and blocks when it's below zero + // producer increses availability and blocks as needed + available int64 +} + +// NewThrottle returns a new Throttle primitive +func NewThrottle() *Throttle { + var throttle Throttle + throttle.consumer.L = &throttle.mu + throttle.producer.L = &throttle.mu + return &throttle +} + +// Consume subtracts amount from the throttle +func (throttle *Throttle) Consume(amount int64) error { + throttle.mu.Lock() + defer throttle.mu.Unlock() + throttle.available -= amount + throttle.producer.Signal() + return throttle.combinedError() +} + +// ConsumeOrWait tries to consume at most maxAmount +func (throttle *Throttle) ConsumeOrWait(maxAmount int64) (int64, error) { + throttle.mu.Lock() + defer throttle.mu.Unlock() + + for throttle.alive() && throttle.available <= 0 { + throttle.consumer.Wait() + } + + available := throttle.available + if available > maxAmount { + available = maxAmount + } + throttle.available -= available + throttle.producer.Signal() + + return available, throttle.combinedError() +} + +// WaitUntilAbove waits until availability drops below limit +func (throttle *Throttle) WaitUntilAbove(limit int64) error { + throttle.mu.Lock() + defer throttle.mu.Unlock() + for throttle.alive() && throttle.available <= limit { + throttle.consumer.Wait() + } + return throttle.combinedError() +} + +// Produce adds amount to the throttle +func (throttle *Throttle) Produce(amount int64) error { + throttle.mu.Lock() + defer throttle.mu.Unlock() + throttle.available += amount + throttle.consumer.Signal() + return throttle.combinedError() +} + +// ProduceAndWaitUntilBelow adds amount to the throttle and waits until it's below the given threshold +func (throttle *Throttle) ProduceAndWaitUntilBelow(amount, limit int64) error { + throttle.mu.Lock() + defer throttle.mu.Unlock() + throttle.available += amount + throttle.consumer.Signal() + for throttle.alive() && throttle.available >= limit { + throttle.producer.Wait() + } + return throttle.combinedError() +} + +// WaitUntilBelow waits until availability drops below limit +func (throttle *Throttle) WaitUntilBelow(limit int64) error { + throttle.mu.Lock() + defer throttle.mu.Unlock() + for throttle.alive() && throttle.available >= limit { + throttle.producer.Wait() + } + return throttle.combinedError() +} + +// Fail stops both consumer and allocator +func (throttle *Throttle) Fail(err error) { + throttle.mu.Lock() + defer throttle.mu.Unlock() + + throttle.errs = append(throttle.errs, err) + throttle.consumer.Signal() + throttle.producer.Signal() +} + +// must hold mutex when calling this +func (throttle *Throttle) alive() bool { return len(throttle.errs) == 0 } + +func (throttle *Throttle) combinedError() error { + if len(throttle.errs) == 0 { + return nil + } + // TODO: combine errors + return throttle.errs[0] +} + +// Err returns the finishing error +func (throttle *Throttle) Err() error { + throttle.mu.Lock() + defer throttle.mu.Unlock() + return throttle.combinedError() +} diff --git a/vendor/storj.io/common/sync2/workgroup.go b/vendor/storj.io/common/sync2/workgroup.go new file mode 100644 index 000000000..8507a7740 --- /dev/null +++ b/vendor/storj.io/common/sync2/workgroup.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information + +package sync2 + +import ( + "sync" +) + +// WorkGroup implements waitable and closable group of workers +type WorkGroup struct { + noCopy noCopy // nolint: structcheck + + mu sync.Mutex + cond sync.Cond + + initialized bool + closed bool + workers int +} + +// init initializes work group +func (group *WorkGroup) init() { + if !group.initialized { + group.cond.L = &group.mu + } +} + +// Go starts func and tracks the execution. +// Returns false when WorkGroup has been closed. +func (group *WorkGroup) Go(fn func()) bool { + if !group.Start() { + return false + } + go func() { + defer group.Done() + fn() + }() + return true +} + +// Start returns true when work can be started +func (group *WorkGroup) Start() bool { + group.mu.Lock() + defer group.mu.Unlock() + + group.init() + if group.closed { + return false + } + group.workers++ + return true +} + +// Done finishes a pending work item +func (group *WorkGroup) Done() { + group.mu.Lock() + defer group.mu.Unlock() + + group.workers-- + if group.workers < 0 { + panic("worker count below zero") + } + if group.workers == 0 { + group.cond.Broadcast() + } +} + +// Wait waits for all workers to finish. +func (group *WorkGroup) Wait() { + group.mu.Lock() + defer group.mu.Unlock() + + group.init() + + for group.workers != 0 { + group.cond.Wait() + } +} + +// Close prevents from new work being started. +func (group *WorkGroup) Close() { + group.mu.Lock() + defer group.mu.Unlock() + + group.init() + group.closed = true +} diff --git a/vendor/storj.io/common/uuid/db.go b/vendor/storj.io/common/uuid/db.go new file mode 100644 index 000000000..3e44d728a --- /dev/null +++ b/vendor/storj.io/common/uuid/db.go @@ -0,0 +1,62 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uuid + +import ( + "database/sql/driver" +) + +// Value implements sql/driver.Valuer interface. +func (uuid UUID) Value() (driver.Value, error) { + return uuid[:], nil +} + +// Scan implements sql.Scanner interface. +func (uuid *UUID) Scan(value interface{}) error { + switch value := value.(type) { + case []byte: + x, err := FromBytes(value) + if err != nil { + return Error.Wrap(err) + } + *uuid = x + return nil + case string: + x, err := FromString(value) + if err != nil { + return Error.Wrap(err) + } + *uuid = x + return nil + default: + return Error.New("unable to scan %T into UUID", value) + } +} + +// NullUUID represents a UUID that may be null. +// NullUUID implements the Scanner interface so it can be used +// as a scan destination, similar to sql.NullString. +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Value implements sql/driver.Valuer interface. +func (n NullUUID) Value() (driver.Value, error) { + if !n.Valid { + return nil, nil + } + return n.UUID.Value() +} + +// Scan implements sql.Scanner interface. +func (n *NullUUID) Scan(value interface{}) error { + if value == nil { + n.UUID, n.Valid = UUID{}, false + return nil + } + + n.Valid = true + return n.UUID.Scan(value) +} diff --git a/vendor/storj.io/common/uuid/fuzz.go b/vendor/storj.io/common/uuid/fuzz.go new file mode 100644 index 000000000..39d812f6e --- /dev/null +++ b/vendor/storj.io/common/uuid/fuzz.go @@ -0,0 +1,28 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build gofuzz + +package uuid + +// To run fuzzing tests: +// +// clone github.com/storj/fuzz-corpus +// +// Install fuzzing tools: +// GO111MODULE=off go get github.com/dvyukov/go-fuzz/... +// +// Build binaries: +// go-fuzz-build . +// +// Run with test corpus: +// go-fuzz -bin uuid-fuzz.zip -workdir $FUZZCORPUS/uuid/testdata + +// Fuzz implements a simple fuzz test for uuid.Parse. +func Fuzz(data []byte) int { + _, err := FromString(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/storj.io/common/uuid/uuid.go b/vendor/storj.io/common/uuid/uuid.go new file mode 100644 index 000000000..b8a290176 --- /dev/null +++ b/vendor/storj.io/common/uuid/uuid.go @@ -0,0 +1,141 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package uuid implements UUID v4 based on RFC4122. +package uuid + +import ( + "crypto/rand" + "encoding/hex" + "io" + + "github.com/zeebo/errs" +) + +// Error is the default error class for uuid. +var Error = errs.Class("uuid") + +// UUID is big-endian encoded UUID. +// +// UUID can be of any version or variant. +type UUID [16]byte + +// New returns a random UUID (version 4 variant 2). +func New() (UUID, error) { + return newRandomFromReader(rand.Reader) +} + +// newRandomFromReader returns a random UUID (version 4 variant 2) +// using a custom reader. +func newRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return uuid, Error.Wrap(err) + } + + // version 4, variant 2 + uuid[6] = (uuid[6] & 0x0f) | 0x40 + uuid[8] = (uuid[8] & 0x3f) | 0x80 + return uuid, nil +} + +// IsZero returns true when all bytes in uuid are 0. +func (uuid UUID) IsZero() bool { return uuid == UUID{} } + +// String returns uuid in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (uuid UUID) String() string { + s := [36]byte{8: '-', 13: '-', 18: '-', 23: '-'} + hex.Encode(s[0:8], uuid[0:4]) + hex.Encode(s[9:13], uuid[4:6]) + hex.Encode(s[14:18], uuid[6:8]) + hex.Encode(s[19:23], uuid[8:10]) + hex.Encode(s[24:36], uuid[10:16]) + return string(s[:]) +} + +// FromBytes converts big-endian raw-bytes to an UUID. +// +// FromBytes allows for any version or variant of an UUID. +func FromBytes(bytes []byte) (UUID, error) { + var uuid UUID + if len(uuid) != len(bytes) { + return uuid, Error.New("bytes have wrong length %d expected %d", len(bytes), len(uuid)) + } + copy(uuid[:], bytes) + return uuid, nil +} + +// FromString parses "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" string form. +// +// FromString allows for any version or variant of an UUID. +func FromString(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + return uuid, Error.New("invalid string length %d expected %d", len(s), 36) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, Error.New("invalid string") + } + + var err error + _, err = hex.Decode(uuid[0:4], []byte(s)[0:8]) + if err != nil { + return uuid, Error.New("invalid string") + } + _, err = hex.Decode(uuid[4:6], []byte(s)[9:13]) + if err != nil { + return uuid, Error.New("invalid string") + } + _, err = hex.Decode(uuid[6:8], []byte(s)[14:18]) + if err != nil { + return uuid, Error.New("invalid string") + } + _, err = hex.Decode(uuid[8:10], []byte(s)[19:23]) + if err != nil { + return uuid, Error.New("invalid string") + } + _, err = hex.Decode(uuid[10:16], []byte(s)[24:36]) + if err != nil { + return uuid, Error.New("invalid string") + } + + return uuid, nil +} + +// MarshalText marshals UUID in `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` form. +func (uuid UUID) MarshalText() ([]byte, error) { + return []byte(uuid.String()), nil +} + +// UnmarshalText unmarshals UUID from `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`. +func (uuid *UUID) UnmarshalText(b []byte) error { + x, err := FromString(string(b)) + if err != nil { + return Error.Wrap(err) + } + *uuid = x + return nil +} + +// MarshalJSON marshals UUID in `"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"` form. +func (uuid UUID) MarshalJSON() ([]byte, error) { + return []byte(`"` + uuid.String() + `"`), nil +} + +// UnmarshalJSON unmarshals UUID from `"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"`. +func (uuid *UUID) UnmarshalJSON(b []byte) error { + if len(b) != 36+2 { + return Error.New("bytes have wrong length %d expected %d", len(b), 36+2) + } + if b[0] != '"' && b[len(b)-1] != '"' { + return Error.New("expected quotes around string") + } + + x, err := FromString(string(b[1 : len(b)-1])) + if err != nil { + return Error.Wrap(err) + } + *uuid = x + return nil +} diff --git a/vendor/storj.io/drpc/.gitignore b/vendor/storj.io/drpc/.gitignore new file mode 100644 index 000000000..c24adb9d0 --- /dev/null +++ b/vendor/storj.io/drpc/.gitignore @@ -0,0 +1,3 @@ +*.test +.vscode +vendor diff --git a/vendor/storj.io/drpc/.golangci.yml b/vendor/storj.io/drpc/.golangci.yml new file mode 100644 index 000000000..32d8f65fd --- /dev/null +++ b/vendor/storj.io/drpc/.golangci.yml @@ -0,0 +1,129 @@ +run: + deadline: 10m + issues-exit-code: 1 + tests: true + + skip-files: + - ".*\\.pb\\.go$" + - ".*\\.dbx\\.go$" + + +linters: + enable: + - govet # check standard vet rules + - golint # check standard linting rules + - staticcheck # comprehensive checks + - errcheck # find unchecked errors + - ineffassign # find ineffective assignments + - varcheck # find unused global variables and constants + - structcheck # check for unused struct parameters + - deadcode # find code that is not used + - bodyclose # find unclosed http response bodies + - nakedret # check for naked returns + - gofmt # sanity check formatting + - misspell # check spelling + - unconvert # remove unnecessary conversions + - scopelint # checks for unpinned variables + - gocritic # checks for style, performance issues, and common programming errors + - dogsled # checks for too many ignored arguments + - goprintffuncname # Checks that printf-like functions are named with `f` at the end [fast: true, auto-fix: false] + - gosec + #TODO#- whitespace # checks for leading/trailing newlines + #TODO#- unparam # check for unused parameters + #TODO#- maligned # check for better memory usage + #TODO#- prealloc # easy optimizations + disable: + - godox + - wsl # too much noise + - goimports # disabled, because it's slow, using scripts/check-imports.go instead. + - goconst # check for things that could be replaced by constants + - gocyclo # needs tweaking + - depguard # unused + - stylecheck # has false positives + - dupl # slow + - interfacer # not that useful + - gosimple # part of staticcheck + - unused # part of staticcheck + - lll + - rowserrcheck # checks if sql.Rows.Err is checked correctly - Disabled because it reports false positive with defer statements after Query call + fast: false + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + govet: + # report about shadowed variables + #TODO# check-shadowing: true + + # Obtain type information from installed (to $GOPATH/pkg) package files: + # golangci-lint will execute `go install -i` and `go test -i` for analyzed packages + # before analyzing them. + # Enable this option only if all conditions are met: + # 1. you use only "fast" linters (--fast e.g.): no program loading occurs + # 2. you use go >= 1.10 + # 3. you do repeated runs (false for CI) or cache $GOPATH/pkg or `go env GOCACHE` dir in CI. + use-installed-packages: false + gocritic: + disabled-checks: + - ifElseChain + goimports: + local: "storj.io" + golint: + min-confidence: 0.8 + gofmt: + simplify: true + gocyclo: + min-complexity: 10 + maligned: + suggest-new: true + dupl: + threshold: 150 + goconst: + min-len: 3 + min-occurrences: 3 + misspell: + lll: + line-length: 140 + tab-width: 1 + unused: + # treat code as a program (not a library) and report unused exported identifiers; default is false. + # XXX: if you enable this setting, unused will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find funcs usages. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + unparam: + # call graph construction algorithm (cha, rta). In general, use cha for libraries, + # and rta for programs with main packages. Default is cha. + algo: cha + + # Inspect exported functions, default is false. Set to true if no external program/library imports your code. + # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: + # if it's called for subdir of a project it can't find external interfaces. All text editor integrations + # with golangci-lint call it on a directory with the changed file. + check-exported: false + nakedret: + # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 + max-func-lines: 30 + prealloc: + # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. + # True by default. + simple: true + range-loops: true # Report preallocation suggestions on range loops, true by default + for-loops: false # Report preallocation suggestions on for loops, false by default + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + new: false + exclude-use-default: false diff --git a/vendor/storj.io/drpc/Dockerfile.jenkins b/vendor/storj.io/drpc/Dockerfile.jenkins new file mode 100644 index 000000000..bd4da981f --- /dev/null +++ b/vendor/storj.io/drpc/Dockerfile.jenkins @@ -0,0 +1,11 @@ +FROM golang:1.13.0 + +# Linters + +RUN GO111MODULE=on go get honnef.co/go/tools/cmd/staticcheck@2019.2.3 +RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b ${GOPATH}/bin v1.23.1 + +# Set our entrypoint to close after 28 minutes, and forcefully close at 30 minutes. +# This is to prevent Jenkins collecting cats. + +ENTRYPOINT ["timeout", "-k30m", "28m"] diff --git a/vendor/storj.io/drpc/Jenkinsfile b/vendor/storj.io/drpc/Jenkinsfile new file mode 100644 index 000000000..b69937da1 --- /dev/null +++ b/vendor/storj.io/drpc/Jenkinsfile @@ -0,0 +1,27 @@ +pipeline { + agent { + dockerfile { + filename 'Dockerfile.jenkins' + args '-u root:root --cap-add SYS_PTRACE -v "/tmp/gomod":/go/pkg/mod' + label 'main' + } + } + stages { + stage('Download') { + steps { + checkout scm + sh 'make download' + } + } + stage('Test') { + steps { + sh 'make test' + } + } + stage('Lint') { + steps { + sh 'make lint' + } + } + } +} diff --git a/vendor/storj.io/drpc/LICENSE b/vendor/storj.io/drpc/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/storj.io/drpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/storj.io/drpc/Makefile b/vendor/storj.io/drpc/Makefile new file mode 100644 index 000000000..3d21e2ce8 --- /dev/null +++ b/vendor/storj.io/drpc/Makefile @@ -0,0 +1,33 @@ +.DEFAULT_GOAL = check + +.PHONY: check +check: generate build lint test docs + +.PHONY: build +build: + ./scripts/run.sh go build ./... + +.PHONY: docs +docs: + ./scripts/docs.sh + +.PHONY: download +download: + ./scripts/run.sh go mod download + +.PHONY: generate +generate: + ./scripts/run.sh go generate ./... + +.PHONY: lint +lint: + ./scripts/run.sh staticcheck ./... + ./scripts/run.sh golangci-lint -j=2 run + +.PHONY: tidy +tidy: + ./scripts/run.sh go mod tidy + +.PHONY: test +test: + ./scripts/run.sh go test ./... -count=1 diff --git a/vendor/storj.io/drpc/README.md b/vendor/storj.io/drpc/README.md new file mode 100644 index 000000000..79d3a28a4 --- /dev/null +++ b/vendor/storj.io/drpc/README.md @@ -0,0 +1,134 @@ +# package drpc + +`import "storj.io/drpc"` + +Package drpc is a light replacement for gprc. + +## Usage + +```go +var ( + Error = errs.Class("drpc") + InternalError = errs.Class("internal error") + ProtocolError = errs.Class("protocol error") +) +``` +These error classes represent some common errors that drpc generates. + +#### type Conn + +```go +type Conn interface { + // Close closes the connection. + Close() error + + // Transport returns the transport the connection is using. + Transport() Transport + + // Invoke issues a unary rpc to the remote. Only one Invoke or Stream may be + // open at once. + Invoke(ctx context.Context, rpc string, in, out Message) error + + // NewStream starts a stream with the remote. Only one Invoke or Stream may be + // open at once. + NewStream(ctx context.Context, rpc string) (Stream, error) +} +``` + +Conn represents a client connection to a server. + +#### type Description + +```go +type Description interface { + // NumMethods returns the number of methods available. + NumMethods() int + + // Method returns the information about the nth method along with a handler + // to invoke it. The method interface that it returns is expected to be + // a method expression like `(*Type).HandlerName`. + Method(n int) (rpc string, receiver Receiver, method interface{}, ok bool) +} +``` + +Description is the interface implemented by things that can be registered by a +Server. + +#### type Handler + +```go +type Handler interface { + HandleRPC(stream Stream, rpc string) (err error) +} +``` + +Handler handles streams and rpcs dispatched to it by a Server. + +#### type Message + +```go +type Message interface { + Reset() + String() string + ProtoMessage() +} +``` + +Message is a protobuf message, just here so protobuf isn't necessary to import +or be exposed in the types. + +#### type Mux + +```go +type Mux interface { + Register(srv interface{}, desc Description) error +} +``` + +Mux is a type that can have an implementation and a Description registered with +it. + +#### type Receiver + +```go +type Receiver = func(srv interface{}, ctx context.Context, in1, in2 interface{}) (out Message, err error) +``` + +Receiver is invoked by a server for a given rpc. + +#### type Stream + +```go +type Stream interface { + // Context returns the context associated with the stream. It is canceled + // when the Stream is closed and no more messages will ever be sent or + // received on it. + Context() context.Context + + // MsgSend sends the Message to the remote. + MsgSend(msg Message) error + + // MsgRecv receives a Message from the remote. + MsgRecv(msg Message) error + + // CloseSend signals to the remote that we will no longer send any messages. + CloseSend() error + + // Close closes the stream. + Close() error +} +``` + +Stream is a bi-directional stream of messages to some other party. + +#### type Transport + +```go +type Transport interface { + io.Reader + io.Writer + io.Closer +} +``` + +Transport is an interface describing what is required for a drpc connection. diff --git a/vendor/storj.io/drpc/doc.go b/vendor/storj.io/drpc/doc.go new file mode 100644 index 000000000..a551f92af --- /dev/null +++ b/vendor/storj.io/drpc/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpc is a light replacement for gprc. +package drpc diff --git a/vendor/storj.io/drpc/drpc.go b/vendor/storj.io/drpc/drpc.go new file mode 100644 index 000000000..d3e7ca81b --- /dev/null +++ b/vendor/storj.io/drpc/drpc.go @@ -0,0 +1,95 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpc + +import ( + "context" + "io" + + "github.com/zeebo/errs" +) + +// These error classes represent some common errors that drpc generates. +var ( + Error = errs.Class("drpc") + InternalError = errs.Class("internal error") + ProtocolError = errs.Class("protocol error") +) + +// Transport is an interface describing what is required for a drpc connection. +type Transport interface { + io.Reader + io.Writer + io.Closer +} + +// Message is a protobuf message, just here so protobuf isn't necessary to +// import or be exposed in the types. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Conn represents a client connection to a server. +type Conn interface { + // Close closes the connection. + Close() error + + // Transport returns the transport the connection is using. + Transport() Transport + + // Invoke issues a unary rpc to the remote. Only one Invoke or Stream may be + // open at once. + Invoke(ctx context.Context, rpc string, in, out Message) error + + // NewStream starts a stream with the remote. Only one Invoke or Stream may be + // open at once. + NewStream(ctx context.Context, rpc string) (Stream, error) +} + +// Stream is a bi-directional stream of messages to some other party. +type Stream interface { + // Context returns the context associated with the stream. It is canceled + // when the Stream is closed and no more messages will ever be sent or + // received on it. + Context() context.Context + + // MsgSend sends the Message to the remote. + MsgSend(msg Message) error + + // MsgRecv receives a Message from the remote. + MsgRecv(msg Message) error + + // CloseSend signals to the remote that we will no longer send any messages. + CloseSend() error + + // Close closes the stream. + Close() error +} + +// Receiver is invoked by a server for a given rpc. +type Receiver = func(srv interface{}, ctx context.Context, in1, in2 interface{}) (out Message, err error) + +// Description is the interface implemented by things that can be registered by +// a Server. +type Description interface { + // NumMethods returns the number of methods available. + NumMethods() int + + // Method returns the information about the nth method along with a handler + // to invoke it. The method interface that it returns is expected to be + // a method expression like `(*Type).HandlerName`. + Method(n int) (rpc string, receiver Receiver, method interface{}, ok bool) +} + +// Mux is a type that can have an implementation and a Description registered with it. +type Mux interface { + Register(srv interface{}, desc Description) error +} + +// Handler handles streams and rpcs dispatched to it by a Server. +type Handler interface { + HandleRPC(stream Stream, rpc string) (err error) +} diff --git a/vendor/storj.io/drpc/drpcconn/README.md b/vendor/storj.io/drpc/drpcconn/README.md new file mode 100644 index 000000000..fb6523e97 --- /dev/null +++ b/vendor/storj.io/drpc/drpcconn/README.md @@ -0,0 +1,79 @@ +# package drpcconn + +`import "storj.io/drpc/drpcconn"` + +Package drpcconn creates a drpc client connection from a transport. + +## Usage + +#### type Conn + +```go +type Conn struct { +} +``` + +Conn is a drpc client connection. + +#### func New + +```go +func New(tr drpc.Transport) *Conn +``` +New returns a conn that uses the transport for reads and writes. + +#### func NewWithOptions + +```go +func NewWithOptions(tr drpc.Transport, opts Options) *Conn +``` +NewWithOptions returns a conn that uses the transport for reads and writes. The +Options control details of how the conn operates. + +#### func (*Conn) Close + +```go +func (c *Conn) Close() (err error) +``` +Close closes the connection. + +#### func (*Conn) Closed + +```go +func (c *Conn) Closed() bool +``` +Closed returns true if the connection is already closed. + +#### func (*Conn) Invoke + +```go +func (c *Conn) Invoke(ctx context.Context, rpc string, in, out drpc.Message) (err error) +``` +Invoke issues the rpc on the transport serializing in, waits for a response, and +deserializes it into out. Only one Invoke or Stream may be open at a time. + +#### func (*Conn) NewStream + +```go +func (c *Conn) NewStream(ctx context.Context, rpc string) (_ drpc.Stream, err error) +``` +NewStream begins a streaming rpc on the connection. Only one Invoke or Stream +may be open at a time. + +#### func (*Conn) Transport + +```go +func (c *Conn) Transport() drpc.Transport +``` +Transport returns the transport the conn is using. + +#### type Options + +```go +type Options struct { + // Manager controls the options we pass to the manager of this conn. + Manager drpcmanager.Options +} +``` + +Options controls configuration settings for a conn. diff --git a/vendor/storj.io/drpc/drpcconn/conn.go b/vendor/storj.io/drpc/drpcconn/conn.go new file mode 100644 index 000000000..1c7c5df89 --- /dev/null +++ b/vendor/storj.io/drpc/drpcconn/conn.go @@ -0,0 +1,156 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcconn + +import ( + "context" + + "github.com/gogo/protobuf/proto" + "github.com/zeebo/errs" + + "storj.io/drpc" + "storj.io/drpc/drpcmanager" + "storj.io/drpc/drpcmetadata" + "storj.io/drpc/drpcstream" + "storj.io/drpc/drpcwire" +) + +// Options controls configuration settings for a conn. +type Options struct { + // Manager controls the options we pass to the manager of this conn. + Manager drpcmanager.Options +} + +// Conn is a drpc client connection. +type Conn struct { + tr drpc.Transport + man *drpcmanager.Manager +} + +var _ drpc.Conn = (*Conn)(nil) + +// New returns a conn that uses the transport for reads and writes. +func New(tr drpc.Transport) *Conn { + return NewWithOptions(tr, Options{}) +} + +// NewWithOptions returns a conn that uses the transport for reads and writes. +// The Options control details of how the conn operates. +func NewWithOptions(tr drpc.Transport, opts Options) *Conn { + return &Conn{ + tr: tr, + man: drpcmanager.NewWithOptions(tr, opts.Manager), + } +} + +// Transport returns the transport the conn is using. +func (c *Conn) Transport() drpc.Transport { + return c.tr +} + +// Closed returns true if the connection is already closed. +func (c *Conn) Closed() bool { + return c.man.Closed() +} + +// Close closes the connection. +func (c *Conn) Close() (err error) { + return c.man.Close() +} + +// Invoke issues the rpc on the transport serializing in, waits for a response, and +// deserializes it into out. Only one Invoke or Stream may be open at a time. +func (c *Conn) Invoke(ctx context.Context, rpc string, in, out drpc.Message) (err error) { + defer mon.Task()(&ctx)(&err) + defer mon.TaskNamed("invoke" + rpc)(&ctx)(&err) + mon.Event("outgoing_requests") + mon.Event("outgoing_invokes") + + var metadata []byte + if md, ok := drpcmetadata.Get(ctx); ok { + metadata, err = drpcmetadata.Encode(metadata, md) + if err != nil { + return err + } + } + + data, err := proto.Marshal(in) + if err != nil { + return errs.Wrap(err) + } + + stream, err := c.man.NewClientStream(ctx) + if err != nil { + return err + } + defer func() { err = errs.Combine(err, stream.Close()) }() + + if err := c.doInvoke(stream, []byte(rpc), data, metadata, out); err != nil { + return err + } + return nil +} + +func (c *Conn) doInvoke(stream *drpcstream.Stream, rpc, data []byte, metadata []byte, out drpc.Message) (err error) { + if len(metadata) > 0 { + if err := stream.RawWrite(drpcwire.KindInvokeMetadata, metadata); err != nil { + return err + } + } + if err := stream.RawWrite(drpcwire.KindInvoke, rpc); err != nil { + return err + } + if err := stream.RawWrite(drpcwire.KindMessage, data); err != nil { + return err + } + if err := stream.CloseSend(); err != nil { + return err + } + if err := stream.MsgRecv(out); err != nil { + return err + } + return nil +} + +// NewStream begins a streaming rpc on the connection. Only one Invoke or Stream may +// be open at a time. +func (c *Conn) NewStream(ctx context.Context, rpc string) (_ drpc.Stream, err error) { + defer mon.Task()(&ctx)(&err) + defer mon.TaskNamed("stream" + rpc)(&ctx)(&err) + mon.Event("outgoing_requests") + mon.Event("outgoing_streams") + + var metadata []byte + if md, ok := drpcmetadata.Get(ctx); ok { + metadata, err = drpcmetadata.Encode(metadata, md) + if err != nil { + return nil, err + } + } + + stream, err := c.man.NewClientStream(ctx) + if err != nil { + return nil, err + } + + if err := c.doNewStream(stream, []byte(rpc), metadata); err != nil { + return nil, errs.Combine(err, stream.Close()) + } + return stream, nil +} + +func (c *Conn) doNewStream(stream *drpcstream.Stream, rpc []byte, metadata []byte) error { + if len(metadata) > 0 { + if err := stream.RawWrite(drpcwire.KindInvokeMetadata, metadata); err != nil { + return err + } + } + if err := stream.RawWrite(drpcwire.KindInvoke, rpc); err != nil { + return err + } + if err := stream.RawFlush(); err != nil { + return err + } + return nil +} diff --git a/vendor/storj.io/drpc/drpcconn/doc.go b/vendor/storj.io/drpc/drpcconn/doc.go new file mode 100644 index 000000000..e2a50817e --- /dev/null +++ b/vendor/storj.io/drpc/drpcconn/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcconn creates a drpc client connection from a transport. +package drpcconn + +import "github.com/spacemonkeygo/monkit/v3" + +var mon = monkit.Package() diff --git a/vendor/storj.io/drpc/drpcctx/README.md b/vendor/storj.io/drpc/drpcctx/README.md new file mode 100644 index 000000000..38f6e870a --- /dev/null +++ b/vendor/storj.io/drpc/drpcctx/README.md @@ -0,0 +1,60 @@ +# package drpcctx + +`import "storj.io/drpc/drpcctx"` + +Package drpcctx has helpers to interact with context.Context. + +## Usage + +#### func Transport + +```go +func Transport(ctx context.Context) (drpc.Transport, bool) +``` +Transport returns the drpc.Transport associated with the context and a bool if +it existed. + +#### func WithTransport + +```go +func WithTransport(ctx context.Context, tr drpc.Transport) context.Context +``` +WithTransport associates the drpc.Transport as a value on the context. + +#### type Tracker + +```go +type Tracker struct { + context.Context +} +``` + +Tracker keeps track of launched goroutines with a context. + +#### func NewTracker + +```go +func NewTracker(ctx context.Context) *Tracker +``` +NewTracker creates a Tracker bound to the provided context. + +#### func (*Tracker) Cancel + +```go +func (t *Tracker) Cancel() +``` +Cancel cancels the tracker's context. + +#### func (*Tracker) Run + +```go +func (t *Tracker) Run(cb func(ctx context.Context)) +``` +Run starts a goroutine running the callback with the tracker as the context. + +#### func (*Tracker) Wait + +```go +func (t *Tracker) Wait() +``` +Wait blocks until all callbacks started with Run have exited. diff --git a/vendor/storj.io/drpc/drpcctx/doc.go b/vendor/storj.io/drpc/drpcctx/doc.go new file mode 100644 index 000000000..b098de782 --- /dev/null +++ b/vendor/storj.io/drpc/drpcctx/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcctx has helpers to interact with context.Context. +package drpcctx diff --git a/vendor/storj.io/drpc/drpcctx/transport.go b/vendor/storj.io/drpc/drpcctx/transport.go new file mode 100644 index 000000000..39b86a178 --- /dev/null +++ b/vendor/storj.io/drpc/drpcctx/transport.go @@ -0,0 +1,59 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcctx + +import ( + "context" + "sync" + + "storj.io/drpc" +) + +type transportKey struct{} + +// WithTransport associates the drpc.Transport as a value on the context. +func WithTransport(ctx context.Context, tr drpc.Transport) context.Context { + return context.WithValue(ctx, transportKey{}, tr) +} + +// Transport returns the drpc.Transport associated with the context and a bool if it +// existed. +func Transport(ctx context.Context) (drpc.Transport, bool) { + tr, ok := ctx.Value(transportKey{}).(drpc.Transport) + return tr, ok +} + +// Tracker keeps track of launched goroutines with a context. +type Tracker struct { + context.Context + cancel func() + wg sync.WaitGroup +} + +// NewTracker creates a Tracker bound to the provided context. +func NewTracker(ctx context.Context) *Tracker { + ctx, cancel := context.WithCancel(ctx) + return &Tracker{ + Context: ctx, + cancel: cancel, + } +} + +// Run starts a goroutine running the callback with the tracker as the context. +func (t *Tracker) Run(cb func(ctx context.Context)) { + t.wg.Add(1) + go t.track(cb) +} + +// track is a helper to call done on the waitgroup after the callback returns. +func (t *Tracker) track(cb func(ctx context.Context)) { + cb(t) + t.wg.Done() +} + +// Cancel cancels the tracker's context. +func (t *Tracker) Cancel() { t.cancel() } + +// Wait blocks until all callbacks started with Run have exited. +func (t *Tracker) Wait() { t.wg.Wait() } diff --git a/vendor/storj.io/drpc/drpcdebug/README.md b/vendor/storj.io/drpc/drpcdebug/README.md new file mode 100644 index 000000000..cd833b27c --- /dev/null +++ b/vendor/storj.io/drpc/drpcdebug/README.md @@ -0,0 +1,14 @@ +# package drpcdebug + +`import "storj.io/drpc/drpcdebug"` + +Package drpcdebug provides helpers for debugging. + +## Usage + +#### func Log + +```go +func Log(cb func() string) +``` +Log executes the callback for a string to log if built with the debug tag. diff --git a/vendor/storj.io/drpc/drpcdebug/doc.go b/vendor/storj.io/drpc/drpcdebug/doc.go new file mode 100644 index 000000000..0f90ff296 --- /dev/null +++ b/vendor/storj.io/drpc/drpcdebug/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcdebug provides helpers for debugging. +package drpcdebug diff --git a/vendor/storj.io/drpc/drpcdebug/log_disabled.go b/vendor/storj.io/drpc/drpcdebug/log_disabled.go new file mode 100644 index 000000000..4a87b4b6d --- /dev/null +++ b/vendor/storj.io/drpc/drpcdebug/log_disabled.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build !debug + +package drpcdebug + +// Log executes the callback for a string to log if built with the debug tag. +func Log(cb func() string) {} diff --git a/vendor/storj.io/drpc/drpcdebug/log_enabled.go b/vendor/storj.io/drpc/drpcdebug/log_enabled.go new file mode 100644 index 000000000..a3de6d06f --- /dev/null +++ b/vendor/storj.io/drpc/drpcdebug/log_enabled.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// +build debug + +package drpcdebug + +import ( + "log" + "os" +) + +var logger = log.New(os.Stderr, "", 0) + +// Log executes the callback for a string to log if built with the debug tag. +func Log(cb func() string) { + logger.Output(2, "\t"+cb()) +} diff --git a/vendor/storj.io/drpc/drpcerr/README.md b/vendor/storj.io/drpc/drpcerr/README.md new file mode 100644 index 000000000..141326d35 --- /dev/null +++ b/vendor/storj.io/drpc/drpcerr/README.md @@ -0,0 +1,22 @@ +# package drpcerr + +`import "storj.io/drpc/drpcerr"` + +Package drpcerr lets one associate error codes with errors. + +## Usage + +#### func Code + +```go +func Code(err error) uint64 +``` +Code returns the error code associated with the error or 0 if none is. + +#### func WithCode + +```go +func WithCode(err error, code uint64) error +``` +WithCode associates the code with the error if it is non nil and the code is +non-zero. diff --git a/vendor/storj.io/drpc/drpcerr/doc.go b/vendor/storj.io/drpc/drpcerr/doc.go new file mode 100644 index 000000000..53ff88e16 --- /dev/null +++ b/vendor/storj.io/drpc/drpcerr/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcerr lets one associate error codes with errors. +package drpcerr diff --git a/vendor/storj.io/drpc/drpcerr/err.go b/vendor/storj.io/drpc/drpcerr/err.go new file mode 100644 index 000000000..f0383bfe6 --- /dev/null +++ b/vendor/storj.io/drpc/drpcerr/err.go @@ -0,0 +1,40 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcerr + +// Code returns the error code associated with the error or 0 if none is. +func Code(err error) uint64 { + for i := 0; i < 100; i++ { + switch v := err.(type) { + case interface{ Code() uint64 }: + return v.Code() + case interface{ Cause() error }: + err = v.Cause() + case interface{ Unwrap() error }: + err = v.Unwrap() + case nil: + return 0 + } + } + return 0 +} + +// WithCode associates the code with the error if it is non nil and the code +// is non-zero. +func WithCode(err error, code uint64) error { + if err == nil || code == 0 { + return err + } + return &codeErr{err: err, code: code} +} + +type codeErr struct { + err error + code uint64 +} + +func (c *codeErr) Error() string { return c.err.Error() } +func (c *codeErr) Unwrap() error { return c.err } +func (c *codeErr) Cause() error { return c.err } +func (c *codeErr) Code() uint64 { return c.code } diff --git a/vendor/storj.io/drpc/drpcmanager/README.md b/vendor/storj.io/drpc/drpcmanager/README.md new file mode 100644 index 000000000..e89564678 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmanager/README.md @@ -0,0 +1,79 @@ +# package drpcmanager + +`import "storj.io/drpc/drpcmanager"` + +Package drpcmanager reads packets from a transport to make streams. + +## Usage + +#### type Manager + +```go +type Manager struct { +} +``` + +Manager handles the logic of managing a transport for a drpc client or server. +It ensures that the connection is always being read from, that it is closed in +the case that the manager is and forwarding drpc protocol messages to the +appropriate stream. + +#### func New + +```go +func New(tr drpc.Transport) *Manager +``` +New returns a new Manager for the transport. + +#### func NewWithOptions + +```go +func NewWithOptions(tr drpc.Transport, opts Options) *Manager +``` +NewWithOptions returns a new manager for the transport. It uses the provided +options to manage details of how it uses it. + +#### func (*Manager) Close + +```go +func (m *Manager) Close() error +``` +Close closes the transport the manager is using. + +#### func (*Manager) Closed + +```go +func (m *Manager) Closed() bool +``` +Closed returns if the manager has been closed. + +#### func (*Manager) NewClientStream + +```go +func (m *Manager) NewClientStream(ctx context.Context) (stream *drpcstream.Stream, err error) +``` +NewClientStream starts a stream on the managed transport for use by a client. + +#### func (*Manager) NewServerStream + +```go +func (m *Manager) NewServerStream(ctx context.Context) (stream *drpcstream.Stream, rpc string, err error) +``` +NewServerStream starts a stream on the managed transport for use by a server. It +does this by waiting for the client to issue an invoke message and returning the +details. + +#### type Options + +```go +type Options struct { + // WriterBufferSize controls the size of the buffer that we will fill before + // flushing. Normal writes to streams typically issue a flush explicitly. + WriterBufferSize int + + // Stream are passed to any streams the manager creates. + Stream drpcstream.Options +} +``` + +Options controls configuration settings for a manager. diff --git a/vendor/storj.io/drpc/drpcmanager/doc.go b/vendor/storj.io/drpc/drpcmanager/doc.go new file mode 100644 index 000000000..8cc5aa48b --- /dev/null +++ b/vendor/storj.io/drpc/drpcmanager/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcmanager reads packets from a transport to make streams. +package drpcmanager + +import "github.com/spacemonkeygo/monkit/v3" + +var mon = monkit.Package() diff --git a/vendor/storj.io/drpc/drpcmanager/manager.go b/vendor/storj.io/drpc/drpcmanager/manager.go new file mode 100644 index 000000000..9aecb72fa --- /dev/null +++ b/vendor/storj.io/drpc/drpcmanager/manager.go @@ -0,0 +1,335 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcmanager + +import ( + "context" + "fmt" + "sync" + + "github.com/zeebo/errs" + + "storj.io/drpc" + "storj.io/drpc/drpcctx" + "storj.io/drpc/drpcdebug" + "storj.io/drpc/drpcmetadata" + "storj.io/drpc/drpcsignal" + "storj.io/drpc/drpcstream" + "storj.io/drpc/drpcwire" +) + +var managerClosed = errs.New("manager closed") + +// Options controls configuration settings for a manager. +type Options struct { + // WriterBufferSize controls the size of the buffer that we will fill before + // flushing. Normal writes to streams typically issue a flush explicitly. + WriterBufferSize int + + // Stream are passed to any streams the manager creates. + Stream drpcstream.Options +} + +// Manager handles the logic of managing a transport for a drpc client or server. +// It ensures that the connection is always being read from, that it is closed +// in the case that the manager is and forwarding drpc protocol messages to the +// appropriate stream. +type Manager struct { + tr drpc.Transport + wr *drpcwire.Writer + rd *drpcwire.Reader + opts Options + + once sync.Once + + sid uint64 + sem chan struct{} + term drpcsignal.Signal // set when the manager should start terminating + read drpcsignal.Signal // set after the goroutine reading from the transport is done + tport drpcsignal.Signal // set after the transport has been closed + queue chan drpcwire.Packet + ctx context.Context +} + +// New returns a new Manager for the transport. +func New(tr drpc.Transport) *Manager { + return NewWithOptions(tr, Options{}) +} + +// NewWithOptions returns a new manager for the transport. It uses the provided +// options to manage details of how it uses it. +func NewWithOptions(tr drpc.Transport, opts Options) *Manager { + m := &Manager{ + tr: tr, + wr: drpcwire.NewWriter(tr, opts.WriterBufferSize), + rd: drpcwire.NewReader(tr), + opts: opts, + + // this semaphore controls the number of concurrent streams. it MUST be 1. + sem: make(chan struct{}, 1), + queue: make(chan drpcwire.Packet), + ctx: drpcctx.WithTransport(context.Background(), tr), + } + + go m.manageTransport() + go m.manageReader() + + return m +} + +// +// helpers +// + +// poll checks if a channel is immediately ready. +func poll(ch <-chan struct{}) bool { + select { + case <-ch: + return true + default: + return false + } +} + +// poll checks if the context is canceled or the manager is terminated. +func (m *Manager) poll(ctx context.Context) error { + switch { + case poll(ctx.Done()): + return ctx.Err() + + case poll(m.term.Signal()): + return m.term.Err() + + default: + return nil + } +} + +// acquireSemaphore attempts to acquire the semaphore protecting streams. If the +// context is canceled or the manager is terminated, it returns an error. +func (m *Manager) acquireSemaphore(ctx context.Context) error { + if err := m.poll(ctx); err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + + case <-m.term.Signal(): + return m.term.Err() + + case m.sem <- struct{}{}: + return nil + } +} + +// +// exported interface +// + +// Closed returns if the manager has been closed. +func (m *Manager) Closed() bool { + return m.term.IsSet() +} + +// Close closes the transport the manager is using. +func (m *Manager) Close() error { + // when closing, we set the manager terminated signal, wait for the goroutine + // managing the transport to notice and close it, acquire the semaphore to ensure + // there are streams running, then wait for the goroutine reading packets to be done. + // we protect it with a once to ensure both that we only do this once, and that + // concurrent calls are sure that it has fully executed. + + m.once.Do(func() { + m.term.Set(managerClosed) + <-m.tport.Signal() + m.sem <- struct{}{} + <-m.read.Signal() + }) + + return m.tport.Err() +} + +// NewClientStream starts a stream on the managed transport for use by a client. +func (m *Manager) NewClientStream(ctx context.Context) (stream *drpcstream.Stream, err error) { + if err := m.acquireSemaphore(ctx); err != nil { + return nil, err + } + + m.sid++ + stream = drpcstream.NewWithOptions(m.ctx, m.sid, m.wr, m.opts.Stream) + go m.manageStream(ctx, stream) + return stream, nil +} + +// NewServerStream starts a stream on the managed transport for use by a server. It does +// this by waiting for the client to issue an invoke message and returning the details. +func (m *Manager) NewServerStream(ctx context.Context) (stream *drpcstream.Stream, rpc string, err error) { + if err := m.acquireSemaphore(ctx); err != nil { + return nil, "", err + } + + var metadata drpcwire.Packet + + for { + select { + case <-ctx.Done(): + <-m.sem + return nil, "", ctx.Err() + + case <-m.term.Signal(): + <-m.sem + return nil, "", m.term.Err() + + case pkt := <-m.queue: + switch pkt.Kind { + case drpcwire.KindInvokeMetadata: + // keep track of any metadata being sent before an invoke so that we can + // include it if the stream id matches the eventual invoke. + metadata = pkt + continue + + case drpcwire.KindInvoke: + streamCtx := m.ctx + if metadata.ID.Stream == pkt.ID.Stream { + md, err := drpcmetadata.Decode(metadata.Data) + if err != nil { + return nil, "", err + } + streamCtx = drpcmetadata.AddPairs(streamCtx, md) + } + + stream = drpcstream.NewWithOptions(streamCtx, pkt.ID.Stream, m.wr, m.opts.Stream) + go m.manageStream(ctx, stream) + return stream, string(pkt.Data), nil + default: + // we ignore packets that arent invokes because perhaps older streams have + // messages in the queue sent concurrently with our notification to them + // that the stream they were sent for is done. + continue + } + } + } +} + +// +// manage transport +// + +// manageTransport ensures that if the manager's term signal is ever set, then +// the underlying transport is closed and the error is recorded. +func (m *Manager) manageTransport() { + defer mon.Task()(nil)(nil) + <-m.term.Signal() + m.tport.Set(m.tr.Close()) +} + +// +// manage reader +// + +// manageReader is always reading a packet and sending it into the queue of packets +// the manager has. It sets the read signal when it exits so that one can wait to +// ensure that no one is reading on the reader. It sets the term signal if there is +// any error reading packets. +func (m *Manager) manageReader() { + defer mon.Task()(nil)(nil) + defer m.read.Set(managerClosed) + + for { + pkt, err := m.rd.ReadPacket() + if err != nil { + m.term.Set(errs.Wrap(err)) + return + } + + drpcdebug.Log(func() string { return fmt.Sprintf("MAN[%p]: %v", m, pkt) }) + + select { + case <-m.term.Signal(): + return + + case m.queue <- pkt: + } + } +} + +// +// manage stream +// + +// manageStream watches the context and the stream and returns when the stream is +// finished, canceling the stream if the context is canceled. +func (m *Manager) manageStream(ctx context.Context, stream *drpcstream.Stream) { + defer mon.Task()(nil)(nil) + + // create a wait group, launch the workers, and wait for them + wg := new(sync.WaitGroup) + wg.Add(2) + go m.manageStreamPackets(wg, stream) + go m.manageStreamContext(ctx, wg, stream) + wg.Wait() + + // always ensure the stream is terminated if we're done managing it. the + // stream should already be in a terminal state unless we're exiting due + // to the manager terminating. that only happens if the underlying transport + // died, so just assume the remote end issued a cancel by terminating + // the transport. + stream.Cancel(context.Canceled) + + // release semaphore + <-m.sem +} + +// manageStreamPackets repeatedly reads from the queue of packets and asks the stream to +// handle them. If there is an error handling a packet, that is considered to +// be fatal to the manager, so we set term. HandlePacket also returns a bool to +// indicate that the stream requires no more packets, and so manageStream can +// just exit. It releases the semaphore whenever it exits. +func (m *Manager) manageStreamPackets(wg *sync.WaitGroup, stream *drpcstream.Stream) { + defer mon.Task()(nil)(nil) + defer wg.Done() + + for { + select { + case <-m.term.Signal(): + return + + case <-stream.Terminated(): + return + + case pkt := <-m.queue: + drpcdebug.Log(func() string { return fmt.Sprintf("FWD[%p][%p]: %v", m, stream, pkt) }) + + ok, err := stream.HandlePacket(pkt) + if err != nil { + m.term.Set(errs.Wrap(err)) + return + } else if !ok { + return + } + } + } +} + +// manageStreamContext ensures that if the stream context is canceled, we inform the stream and +// possibly abort the underlying transport if the stream isn't finished. +func (m *Manager) manageStreamContext(ctx context.Context, wg *sync.WaitGroup, stream *drpcstream.Stream) { + defer mon.Task()(nil)(nil) + defer wg.Done() + + select { + case <-m.term.Signal(): + return + + case <-stream.Terminated(): + return + + case <-ctx.Done(): + stream.Cancel(ctx.Err()) + if !stream.Finished() { + m.term.Set(ctx.Err()) + } + } +} diff --git a/vendor/storj.io/drpc/drpcmetadata/README.md b/vendor/storj.io/drpc/drpcmetadata/README.md new file mode 100644 index 000000000..70cfcefff --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/README.md @@ -0,0 +1,37 @@ +# package drpcmetadata + +`import "storj.io/drpc/drpcmetadata"` + +Package drpcmetadata define the structure of the metadata supported by drpc +library. + +## Usage + +#### func Add + +```go +func Add(ctx context.Context, key, value string) context.Context +``` +Add associates a key/value pair on the context. + +#### func AddPairs + +```go +func AddPairs(ctx context.Context, md map[string]string) context.Context +``` +AddPairs attaches metadata onto a context and return the context. + +#### func Decode + +```go +func Decode(data []byte) (*invoke.InvokeMetadata, error) +``` +Decode translate byte form of metadata into metadata struct defined by protobuf. + +#### func Encode + +```go +func Encode(buffer []byte) ([]byte, error) +``` +Encode generates byte form of the metadata and appends it onto the passed in +buffer. diff --git a/vendor/storj.io/drpc/drpcmetadata/doc.go b/vendor/storj.io/drpc/drpcmetadata/doc.go new file mode 100644 index 000000000..61f9a219e --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcmetadata define the structure of the metadata supported by drpc library. +package drpcmetadata diff --git a/vendor/storj.io/drpc/drpcmetadata/invoke/README.md b/vendor/storj.io/drpc/drpcmetadata/invoke/README.md new file mode 100644 index 000000000..e416ee5d3 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/invoke/README.md @@ -0,0 +1,80 @@ +# package invoke + +`import "storj.io/drpc/drpcmetadata/invoke"` + +Package invoke defines the proto messages exposed by drpc for sending metadata +across the wire. + +## Usage + +#### type InvokeMetadata + +```go +type InvokeMetadata struct { + Data map[string]string `protobuf:"bytes,2,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} +``` + + +#### func (*InvokeMetadata) Descriptor + +```go +func (*InvokeMetadata) Descriptor() ([]byte, []int) +``` + +#### func (*InvokeMetadata) GetData + +```go +func (m *InvokeMetadata) GetData() map[string]string +``` + +#### func (*InvokeMetadata) ProtoMessage + +```go +func (*InvokeMetadata) ProtoMessage() +``` + +#### func (*InvokeMetadata) Reset + +```go +func (m *InvokeMetadata) Reset() +``` + +#### func (*InvokeMetadata) String + +```go +func (m *InvokeMetadata) String() string +``` + +#### func (*InvokeMetadata) XXX_DiscardUnknown + +```go +func (m *InvokeMetadata) XXX_DiscardUnknown() +``` + +#### func (*InvokeMetadata) XXX_Marshal + +```go +func (m *InvokeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +``` + +#### func (*InvokeMetadata) XXX_Merge + +```go +func (m *InvokeMetadata) XXX_Merge(src proto.Message) +``` + +#### func (*InvokeMetadata) XXX_Size + +```go +func (m *InvokeMetadata) XXX_Size() int +``` + +#### func (*InvokeMetadata) XXX_Unmarshal + +```go +func (m *InvokeMetadata) XXX_Unmarshal(b []byte) error +``` diff --git a/vendor/storj.io/drpc/drpcmetadata/invoke/doc.go b/vendor/storj.io/drpc/drpcmetadata/invoke/doc.go new file mode 100644 index 000000000..183dafc44 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/invoke/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package invoke defines the proto messages exposed by drpc for +// sending metadata across the wire. +package invoke + +//go:generate bash -c "go install storj.io/drpc/cmd/protoc-gen-drpc && protoc --drpc_out=plugins=drpc:. metadata.proto" diff --git a/vendor/storj.io/drpc/drpcmetadata/invoke/metadata.pb.go b/vendor/storj.io/drpc/drpcmetadata/invoke/metadata.pb.go new file mode 100644 index 000000000..df224b567 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/invoke/metadata.pb.go @@ -0,0 +1,79 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: metadata.proto + +package invoke + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Metadata struct { + Data map[string]string `protobuf:"bytes,1,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metadata) Reset() { *m = Metadata{} } +func (m *Metadata) String() string { return proto.CompactTextString(m) } +func (*Metadata) ProtoMessage() {} +func (*Metadata) Descriptor() ([]byte, []int) { + return fileDescriptor_56d9f74966f40d04, []int{0} +} +func (m *Metadata) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metadata.Unmarshal(m, b) +} +func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) +} +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) +} +func (m *Metadata) XXX_Size() int { + return xxx_messageInfo_Metadata.Size(m) +} +func (m *Metadata) XXX_DiscardUnknown() { + xxx_messageInfo_Metadata.DiscardUnknown(m) +} + +var xxx_messageInfo_Metadata proto.InternalMessageInfo + +func (m *Metadata) GetData() map[string]string { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*Metadata)(nil), "metadata.Metadata") + proto.RegisterMapType((map[string]string)(nil), "metadata.Metadata.DataEntry") +} + +func init() { proto.RegisterFile("metadata.proto", fileDescriptor_56d9f74966f40d04) } + +var fileDescriptor_56d9f74966f40d04 = []byte{ + // 138 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcb, 0x4d, 0x2d, 0x49, + 0x4c, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf1, 0x95, 0x4a, + 0xb9, 0x38, 0x7c, 0xa1, 0x6c, 0x21, 0x03, 0x2e, 0x16, 0x10, 0x2d, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, + 0x6d, 0x24, 0xa3, 0x07, 0xd7, 0x04, 0x53, 0xa1, 0xe7, 0x92, 0x58, 0x92, 0xe8, 0x9a, 0x57, 0x52, + 0x54, 0x19, 0x04, 0x56, 0x29, 0x65, 0xce, 0xc5, 0x09, 0x17, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e, + 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, + 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15, 0x93, 0x05, 0xa3, 0x13, 0x47, 0x14, + 0x5b, 0x66, 0x5e, 0x59, 0x7e, 0x76, 0x6a, 0x12, 0x1b, 0xd8, 0x45, 0xc6, 0x80, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xc2, 0xda, 0x43, 0xa9, 0xa3, 0x00, 0x00, 0x00, +} diff --git a/vendor/storj.io/drpc/drpcmetadata/invoke/metadata.proto b/vendor/storj.io/drpc/drpcmetadata/invoke/metadata.proto new file mode 100644 index 000000000..0c73ad0f2 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/invoke/metadata.proto @@ -0,0 +1,11 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +syntax = "proto3"; +option go_package = "invoke"; + +package metadata; + +message Metadata { + map data = 1; +} diff --git a/vendor/storj.io/drpc/drpcmetadata/metadata.go b/vendor/storj.io/drpc/drpcmetadata/metadata.go new file mode 100644 index 000000000..d3e8a12e7 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmetadata/metadata.go @@ -0,0 +1,79 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcmetadata + +import ( + "context" + + "github.com/gogo/protobuf/proto" + + "storj.io/drpc/drpcmetadata/invoke" +) + +// AddPairs attaches metadata onto a context and return the context. +func AddPairs(ctx context.Context, md map[string]string) context.Context { + if len(md) < 1 { + return ctx + } + + for key, val := range md { + ctx = Add(ctx, key, val) + } + + return ctx +} + +// Encode generates byte form of the metadata and appends it onto the passed in buffer. +func Encode(buffer []byte, md map[string]string) ([]byte, error) { + if len(md) < 1 { + return buffer, nil + } + + msg := invoke.Metadata{ + Data: md, + } + + msgBytes, err := proto.Marshal(&msg) + if err != nil { + return buffer, err + } + + buffer = append(buffer, msgBytes...) + + return buffer, nil +} + +// Decode translate byte form of metadata into key/value metadata. +func Decode(data []byte) (map[string]string, error) { + if len(data) < 1 { + return map[string]string{}, nil + } + + msg := invoke.Metadata{} + err := proto.Unmarshal(data, &msg) + if err != nil { + return nil, err + } + + return msg.Data, nil +} + +type metadataKey struct{} + +// Add associates a key/value pair on the context. +func Add(ctx context.Context, key, value string) context.Context { + metadata, ok := Get(ctx) + if !ok { + metadata = make(map[string]string) + ctx = context.WithValue(ctx, metadataKey{}, metadata) + } + metadata[key] = value + return ctx +} + +// Get returns all key/value pairs on the given context. +func Get(ctx context.Context) (map[string]string, bool) { + metadata, ok := ctx.Value(metadataKey{}).(map[string]string) + return metadata, ok +} diff --git a/vendor/storj.io/drpc/drpcmux/README.md b/vendor/storj.io/drpc/drpcmux/README.md new file mode 100644 index 000000000..194965d07 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmux/README.md @@ -0,0 +1,39 @@ +# package drpcmux + +`import "storj.io/drpc/drpcmux"` + +Package drpcmux is a handler to dispatch rpcs to implementations. + +## Usage + +#### type Mux + +```go +type Mux struct { +} +``` + +Mux is an implementation of Handler to serve drpc connections to the appropriate +Receivers registered by Descriptions. + +#### func New + +```go +func New() *Mux +``` +New constructs a new Mux. + +#### func (*Mux) HandleRPC + +```go +func (m *Mux) HandleRPC(stream drpc.Stream, rpc string) (err error) +``` +HandleRPC handles the rpc that has been requested by the stream. + +#### func (*Mux) Register + +```go +func (m *Mux) Register(srv interface{}, desc drpc.Description) error +``` +Register associates the rpcs described by the description in the server. It +returns an error if there was a problem registering it. diff --git a/vendor/storj.io/drpc/drpcmux/doc.go b/vendor/storj.io/drpc/drpcmux/doc.go new file mode 100644 index 000000000..89f79f464 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmux/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcmux is a handler to dispatch rpcs to implementations. +package drpcmux diff --git a/vendor/storj.io/drpc/drpcmux/mux.go b/vendor/storj.io/drpc/drpcmux/mux.go new file mode 100644 index 000000000..ca265fcb4 --- /dev/null +++ b/vendor/storj.io/drpc/drpcmux/mux.go @@ -0,0 +1,116 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcmux + +import ( + "reflect" + + "github.com/zeebo/errs" + + "storj.io/drpc" +) + +// Mux is an implementation of Handler to serve drpc connections to the +// appropriate Receivers registered by Descriptions. +type Mux struct { + rpcs map[string]rpcData +} + +// New constructs a new Mux. +func New() *Mux { + return &Mux{ + rpcs: make(map[string]rpcData), + } +} + +var ( + streamType = reflect.TypeOf((*drpc.Stream)(nil)).Elem() + messageType = reflect.TypeOf((*drpc.Message)(nil)).Elem() +) + +type rpcData struct { + srv interface{} + receiver drpc.Receiver + in1 reflect.Type + in2 reflect.Type +} + +// Register associates the rpcs described by the description in the server. +// It returns an error if there was a problem registering it. +func (m *Mux) Register(srv interface{}, desc drpc.Description) error { + n := desc.NumMethods() + for i := 0; i < n; i++ { + rpc, receiver, method, ok := desc.Method(i) + if !ok { + return errs.New("Description returned invalid method for index %d", i) + } + if err := m.registerOne(srv, rpc, receiver, method); err != nil { + return err + } + } + return nil +} + +// registerOne does the work to register a single rpc. +func (m *Mux) registerOne(srv interface{}, rpc string, receiver drpc.Receiver, method interface{}) error { + data := rpcData{srv: srv, receiver: receiver} + + switch mt := reflect.TypeOf(method); { + // unitary input, unitary output + case mt.NumOut() == 2: + data.in1 = mt.In(2) + if !data.in1.Implements(messageType) { + return errs.New("input argument not a drpc message: %v", data.in1) + } + + // unitary input, stream output + case mt.NumIn() == 3: + data.in1 = mt.In(1) + if !data.in1.Implements(messageType) { + return errs.New("input argument not a drpc message: %v", data.in1) + } + data.in2 = streamType + + // stream input + case mt.NumIn() == 2: + data.in1 = streamType + + // code gen bug? + default: + return errs.New("unknown method type: %v", mt) + } + + m.rpcs[rpc] = data + return nil +} + +// HandleRPC handles the rpc that has been requested by the stream. +func (m *Mux) HandleRPC(stream drpc.Stream, rpc string) (err error) { + data, ok := m.rpcs[rpc] + if !ok { + return drpc.ProtocolError.New("unknown rpc: %q", rpc) + } + + in := interface{}(stream) + if data.in1 != streamType { + msg, ok := reflect.New(data.in1.Elem()).Interface().(drpc.Message) + if !ok { + return drpc.InternalError.New("invalid rpc input type") + } + if err := stream.MsgRecv(msg); err != nil { + return errs.Wrap(err) + } + in = msg + } + + out, err := data.receiver(data.srv, stream.Context(), in, stream) + switch { + case err != nil: + return errs.Wrap(err) + case out != nil: + return stream.MsgSend(out) + default: + return nil + } +} diff --git a/vendor/storj.io/drpc/drpcsignal/README.md b/vendor/storj.io/drpc/drpcsignal/README.md new file mode 100644 index 000000000..ccaab1f84 --- /dev/null +++ b/vendor/storj.io/drpc/drpcsignal/README.md @@ -0,0 +1,56 @@ +# package drpcsignal + +`import "storj.io/drpc/drpcsignal"` + +Package drpcsignal holds a helper type to signal errors. + +## Usage + +#### type Signal + +```go +type Signal struct { +} +``` + +Signal contains an error value that can be set one and exports a number of ways +to inspect it. + +#### func (*Signal) Err + +```go +func (s *Signal) Err() error +``` +Err returns the error stored in the signal. Since one can store a nil error care +must be taken. A non-nil error returned from this method means that the Signal +has been set, but the inverse is not true. + +#### func (*Signal) Get + +```go +func (s *Signal) Get() (error, bool) +``` +Get returns the error set with the signal and a boolean indicating if the result +is valid. + +#### func (*Signal) IsSet + +```go +func (s *Signal) IsSet() bool +``` +IsSet returns true if the Signal is set. + +#### func (*Signal) Set + +```go +func (s *Signal) Set(err error) (ok bool) +``` +Set stores the error in the signal. It only keeps track of the first error set, +and returns true if it was the first error set. + +#### func (*Signal) Signal + +```go +func (s *Signal) Signal() chan struct{} +``` +Signal returns a channel that will be closed when the signal is set. diff --git a/vendor/storj.io/drpc/drpcsignal/doc.go b/vendor/storj.io/drpc/drpcsignal/doc.go new file mode 100644 index 000000000..d03192e7a --- /dev/null +++ b/vendor/storj.io/drpc/drpcsignal/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcsignal holds a helper type to signal errors. +package drpcsignal diff --git a/vendor/storj.io/drpc/drpcsignal/signal.go b/vendor/storj.io/drpc/drpcsignal/signal.go new file mode 100644 index 000000000..a4aa1d570 --- /dev/null +++ b/vendor/storj.io/drpc/drpcsignal/signal.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcsignal + +import ( + "sync" + "sync/atomic" +) + +// Signal contains an error value that can be set one and exports +// a number of ways to inspect it. +type Signal struct { + set uint32 + on sync.Once + mu sync.Mutex + sig chan struct{} + err error +} + +func (s *Signal) init() { s.sig = make(chan struct{}) } + +// Signal returns a channel that will be closed when the signal is set. +func (s *Signal) Signal() chan struct{} { + s.on.Do(s.init) + return s.sig +} + +// Set stores the error in the signal. It only keeps track of the first +// error set, and returns true if it was the first error set. +func (s *Signal) Set(err error) (ok bool) { + if atomic.LoadUint32(&s.set) != 0 { + return false + } + return s.setSlow(err) +} + +// setSlow is the slow path for Set, so that the fast path is inlined into +// callers. +func (s *Signal) setSlow(err error) (ok bool) { + s.mu.Lock() + if s.set == 0 { + s.err = err + atomic.StoreUint32(&s.set, 1) + s.on.Do(s.init) + close(s.sig) + ok = true + } + s.mu.Unlock() + return ok +} + +// Get returns the error set with the signal and a boolean indicating if +// the result is valid. +func (s *Signal) Get() (error, bool) { //nolint + if atomic.LoadUint32(&s.set) != 0 { + return s.err, true + } + return nil, false +} + +// IsSet returns true if the Signal is set. +func (s *Signal) IsSet() bool { + return atomic.LoadUint32(&s.set) != 0 +} + +// Err returns the error stored in the signal. Since one can store a nil error +// care must be taken. A non-nil error returned from this method means that +// the Signal has been set, but the inverse is not true. +func (s *Signal) Err() error { + if atomic.LoadUint32(&s.set) != 0 { + return s.err + } + return nil +} diff --git a/vendor/storj.io/drpc/drpcstream/README.md b/vendor/storj.io/drpc/drpcstream/README.md new file mode 100644 index 000000000..aeafa7935 --- /dev/null +++ b/vendor/storj.io/drpc/drpcstream/README.md @@ -0,0 +1,147 @@ +# package drpcstream + +`import "storj.io/drpc/drpcstream"` + +Package drpcstream sends protobufs using the dprc wire protocol. + +## Usage + +#### type Options + +```go +type Options struct { + // SplitSize controls the default size we split packets into frames. + SplitSize int +} +``` + +Options controls configuration settings for a stream. + +#### type Stream + +```go +type Stream struct { +} +``` + +Stream represents an rpc actively happening on a transport. + +#### func New + +```go +func New(ctx context.Context, sid uint64, wr *drpcwire.Writer) *Stream +``` +New returns a new stream bound to the context with the given stream id and will +use the writer to write messages on. It is important use monotonically +increasing stream ids within a single transport. + +#### func NewWithOptions + +```go +func NewWithOptions(ctx context.Context, sid uint64, wr *drpcwire.Writer, opts Options) *Stream +``` +NewWithOptions returns a new stream bound to the context with the given stream +id and will use the writer to write messages on. It is important use +monotonically increasing stream ids within a single transport. The options are +used to control details of how the Stream operates. + +#### func (*Stream) Cancel + +```go +func (s *Stream) Cancel(err error) +``` +Cancel transitions the stream into a state where all writes to the transport +will return the provided error, and terminates the stream. It is a no-op if the +stream is already terminated. + +#### func (*Stream) Close + +```go +func (s *Stream) Close() (err error) +``` +Close terminates the stream and sends that the stream has been closed to the +remote. It is a no-op if the stream is already terminated. + +#### func (*Stream) CloseSend + +```go +func (s *Stream) CloseSend() (err error) +``` +CloseSend informs the remote that no more messages will be sent. If the remote +has also already issued a CloseSend, the stream is terminated. It is a no-op if +the stream already has sent a CloseSend or if it is terminated. + +#### func (*Stream) Context + +```go +func (s *Stream) Context() context.Context +``` +Context returns the context associated with the stream. It is closed when the +Stream will no longer issue any writes or reads. + +#### func (*Stream) Finished + +```go +func (s *Stream) Finished() bool +``` +Finished returns true if the stream is fully finished and will no longer issue +any writes or reads. + +#### func (*Stream) HandlePacket + +```go +func (s *Stream) HandlePacket(pkt drpcwire.Packet) (more bool, err error) +``` +HandlePacket advances the stream state machine by inspecting the packet. It +returns any major errors that should terminate the transport the stream is +operating on as well as a boolean indicating if the stream expects more packets. + +#### func (*Stream) MsgRecv + +```go +func (s *Stream) MsgRecv(msg drpc.Message) (err error) +``` +MsgRecv recives some protobuf data and unmarshals it into msg. + +#### func (*Stream) MsgSend + +```go +func (s *Stream) MsgSend(msg drpc.Message) (err error) +``` +MsgSend marshals the message with protobuf, writes it, and flushes. + +#### func (*Stream) RawFlush + +```go +func (s *Stream) RawFlush() (err error) +``` +RawFlush flushes any buffers of data. + +#### func (*Stream) RawRecv + +```go +func (s *Stream) RawRecv() (data []byte, err error) +``` +RawRecv returns the raw bytes received for a message. + +#### func (*Stream) RawWrite + +```go +func (s *Stream) RawWrite(kind drpcwire.Kind, data []byte) (err error) +``` +RawWrite sends the data bytes with the given kind. + +#### func (*Stream) SendError + +```go +func (s *Stream) SendError(serr error) (err error) +``` +SendError terminates the stream and sends the error to the remote. It is a no-op +if the stream is already terminated. + +#### func (*Stream) Terminated + +```go +func (s *Stream) Terminated() <-chan struct{} +``` +Terminated returns a channel when the stream has been terminated. diff --git a/vendor/storj.io/drpc/drpcstream/chmutex.go b/vendor/storj.io/drpc/drpcstream/chmutex.go new file mode 100644 index 000000000..72a826fff --- /dev/null +++ b/vendor/storj.io/drpc/drpcstream/chmutex.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcstream + +import "sync" + +type chMutex struct { + ch chan struct{} + once sync.Once +} + +func (m *chMutex) init() { m.ch = make(chan struct{}, 1) } + +func (m *chMutex) Chan() chan struct{} { + m.once.Do(m.init) + return m.ch +} + +func (m *chMutex) Lock() { + m.once.Do(m.init) + m.ch <- struct{}{} +} + +func (m *chMutex) TryLock() bool { + m.once.Do(m.init) + select { + case m.ch <- struct{}{}: + return true + default: + return false + } +} + +func (m *chMutex) Unlock() { + m.once.Do(m.init) + <-m.ch +} diff --git a/vendor/storj.io/drpc/drpcstream/doc.go b/vendor/storj.io/drpc/drpcstream/doc.go new file mode 100644 index 000000000..e9e0e45d0 --- /dev/null +++ b/vendor/storj.io/drpc/drpcstream/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcstream sends protobufs using the dprc wire protocol. +package drpcstream + +import "github.com/spacemonkeygo/monkit/v3" + +var mon = monkit.Package() diff --git a/vendor/storj.io/drpc/drpcstream/stream.go b/vendor/storj.io/drpc/drpcstream/stream.go new file mode 100644 index 000000000..d97d66189 --- /dev/null +++ b/vendor/storj.io/drpc/drpcstream/stream.go @@ -0,0 +1,424 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcstream + +import ( + "context" + "fmt" + "io" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/zeebo/errs" + + "storj.io/drpc" + "storj.io/drpc/drpcdebug" + "storj.io/drpc/drpcsignal" + "storj.io/drpc/drpcwire" +) + +// Options controls configuration settings for a stream. +type Options struct { + // SplitSize controls the default size we split packets into frames. + SplitSize int +} + +// Stream represents an rpc actively happening on a transport. +type Stream struct { + ctx context.Context + cancel func() + opts Options + + writeMu chMutex + id drpcwire.ID + wr *drpcwire.Writer + + mu sync.Mutex // protects state transitions + sigs struct { + send drpcsignal.Signal // set when done sending messages + recv drpcsignal.Signal // set when done receiving messages + term drpcsignal.Signal // set when in terminated state + finish drpcsignal.Signal // set when all writes are complete + cancel drpcsignal.Signal // set when externally canceled and transport will be closed + } + queue chan drpcwire.Packet + + // avoids allocations of closures + pollWriteFn func(drpcwire.Frame) error +} + +var _ drpc.Stream = (*Stream)(nil) + +// New returns a new stream bound to the context with the given stream id and will +// use the writer to write messages on. It is important use monotonically increasing +// stream ids within a single transport. +func New(ctx context.Context, sid uint64, wr *drpcwire.Writer) *Stream { + return NewWithOptions(ctx, sid, wr, Options{}) +} + +// NewWithOptions returns a new stream bound to the context with the given stream id +// and will use the writer to write messages on. It is important use monotonically increasing +// stream ids within a single transport. The options are used to control details of how +// the Stream operates. +func NewWithOptions(ctx context.Context, sid uint64, wr *drpcwire.Writer, opts Options) *Stream { + ctx, cancel := context.WithCancel(ctx) + + s := &Stream{ + ctx: ctx, + cancel: cancel, + opts: opts, + + wr: wr, + + id: drpcwire.ID{Stream: sid}, + queue: make(chan drpcwire.Packet), + } + + s.pollWriteFn = s.pollWrite + + return s +} + +// +// monitoring helpers +// + +// monCtx returns a copy of the context for use with mon.Task so that there aren't +// races overwriting the stream's context. +func (s *Stream) monCtx() *context.Context { + ctx := s.ctx + return &ctx +} + +// +// accessors +// + +// Context returns the context associated with the stream. It is closed when +// the Stream will no longer issue any writes or reads. +func (s *Stream) Context() context.Context { return s.ctx } + +// Terminated returns a channel when the stream has been terminated. +func (s *Stream) Terminated() <-chan struct{} { return s.sigs.term.Signal() } + +// Finished returns true if the stream is fully finished and will no longer +// issue any writes or reads. +func (s *Stream) Finished() bool { return s.sigs.finish.IsSet() } + +// +// packet handler +// + +// HandlePacket advances the stream state machine by inspecting the packet. It returns +// any major errors that should terminate the transport the stream is operating on as +// well as a boolean indicating if the stream expects more packets. +func (s *Stream) HandlePacket(pkt drpcwire.Packet) (more bool, err error) { + defer mon.Task()(s.monCtx())(&err) + + s.mu.Lock() + defer s.mu.Unlock() + + drpcdebug.Log(func() string { return fmt.Sprintf("STR[%p][%d]: %v", s, s.id.Stream, pkt) }) + + if pkt.ID.Stream != s.id.Stream { + return true, nil + } + + switch pkt.Kind { + case drpcwire.KindInvoke: + err := drpc.ProtocolError.New("invoke on existing stream") + s.terminate(err) + return false, err + + case drpcwire.KindMessage: + if s.sigs.recv.IsSet() || s.sigs.term.IsSet() { + return true, nil + } + + // drop the mutex while we either send into the queue or we're told that + // receiving is done. we don't handle any more packets until the message + // is delivered, so the only way it can become set is from some of the + // stream terminating calls, in which case, shutting down the stream is + // racing with the message being received, so dropping it is valid. + s.mu.Unlock() + defer s.mu.Lock() + + select { + case <-s.sigs.recv.Signal(): + case <-s.sigs.term.Signal(): + case s.queue <- pkt: + } + + return true, nil + + case drpcwire.KindError: + err := drpcwire.UnmarshalError(pkt.Data) + s.sigs.send.Set(io.EOF) // in this state, gRPC returns io.EOF on send. + s.terminate(err) + return false, nil + + case drpcwire.KindClose: + s.sigs.recv.Set(io.EOF) + s.terminate(drpc.Error.New("remote closed the stream")) + return false, nil + + case drpcwire.KindCloseSend: + s.sigs.recv.Set(io.EOF) + s.terminateIfBothClosed() + return false, nil + + default: + err := drpc.InternalError.New("unknown packet kind: %s", pkt.Kind) + s.terminate(err) + return false, err + } +} + +// +// helpers +// + +// checkFinished checks to see if the stream is terminated, and if so, sets the finished +// flag. This must be called every time right before we release the write mutex. +func (s *Stream) checkFinished() { + if s.sigs.term.IsSet() { + s.sigs.finish.Set(nil) + } +} + +// checkCancelError will replace the error with one from the cancel signal if it is +// set. This is to prevent errors from reads/writes to a transport after it has been +// asynchronously closed due to context cancelation. +func (s *Stream) checkCancelError(err error) error { + if sigErr, ok := s.sigs.cancel.Get(); ok { + return sigErr + } + return err +} + +// newPackage bumps the internal message id and returns a packet. It must be called +// under a mutex. +func (s *Stream) newPacket(kind drpcwire.Kind, data []byte) drpcwire.Packet { + s.id.Message++ + return drpcwire.Packet{ + Data: data, + ID: s.id, + Kind: kind, + } +} + +// pollWrite checks for any conditions that should cause a write to not happen and +// then issues the write of the frame. +func (s *Stream) pollWrite(fr drpcwire.Frame) (err error) { + switch { + case s.sigs.send.IsSet(): + return s.sigs.send.Err() + case s.sigs.term.IsSet(): + return s.sigs.term.Err() + } + + return s.checkCancelError(errs.Wrap(s.wr.WriteFrame(fr))) +} + +// sendPacket sends the packet in a single write and flushes. It does not check for +// any conditions to stop it from writing and is meant for internal stream use to +// do things like signal errors or closes to the remote side. +func (s *Stream) sendPacket(kind drpcwire.Kind, data []byte) (err error) { + defer mon.Task()(s.monCtx())(&err) + + if err := s.wr.WritePacket(s.newPacket(kind, data)); err != nil { + return errs.Wrap(err) + } + if err := s.wr.Flush(); err != nil { + return errs.Wrap(err) + } + return nil +} + +// terminateIfBothClosed is a helper to terminate the stream if both sides have +// issued a CloseSend. +func (s *Stream) terminateIfBothClosed() { + if s.sigs.send.IsSet() && s.sigs.recv.IsSet() { + s.terminate(drpc.Error.New("stream terminated by both issuing close send")) + } +} + +// terminate marks the stream as terminated with the given error. It also marks +// the stream as finished if no writes are happening at the time of the call. +func (s *Stream) terminate(err error) { + s.sigs.send.Set(err) + s.sigs.recv.Set(err) + s.sigs.term.Set(err) + s.cancel() + + // if we can acquire the write mutex, then checkFinished. if not, then we know + // some other write is happening, and it will call checkFinished before it + // releases the mutex. + if s.writeMu.TryLock() { + s.checkFinished() + s.writeMu.Unlock() + } +} + +// +// raw read/write +// + +// RawWrite sends the data bytes with the given kind. +func (s *Stream) RawWrite(kind drpcwire.Kind, data []byte) (err error) { + defer mon.Task()(s.monCtx())(&err) + + s.writeMu.Lock() + defer s.writeMu.Unlock() + defer s.checkFinished() + + return drpcwire.SplitN(s.newPacket(kind, data), s.opts.SplitSize, s.pollWriteFn) +} + +// RawFlush flushes any buffers of data. +func (s *Stream) RawFlush() (err error) { + defer mon.Task()(s.monCtx())(&err) + + s.writeMu.Lock() + defer s.writeMu.Unlock() + defer s.checkFinished() + + return s.checkCancelError(errs.Wrap(s.wr.Flush())) +} + +// RawRecv returns the raw bytes received for a message. +func (s *Stream) RawRecv() (data []byte, err error) { + defer mon.Task()(s.monCtx())(&err) + + if s.sigs.recv.IsSet() { + return nil, s.sigs.recv.Err() + } + + select { + case <-s.sigs.recv.Signal(): + return nil, s.sigs.recv.Err() + case pkt := <-s.queue: + return pkt.Data, nil + } +} + +// +// msg read/write +// + +// MsgSend marshals the message with protobuf, writes it, and flushes. +func (s *Stream) MsgSend(msg drpc.Message) (err error) { + defer mon.Task()(s.monCtx())(&err) + + data, err := proto.Marshal(msg) + if err != nil { + return errs.Wrap(err) + } + if err := s.RawWrite(drpcwire.KindMessage, data); err != nil { + return err + } + if err := s.RawFlush(); err != nil { + return err + } + return nil +} + +// MsgRecv recives some protobuf data and unmarshals it into msg. +func (s *Stream) MsgRecv(msg drpc.Message) (err error) { + defer mon.Task()(s.monCtx())(&err) + + data, err := s.RawRecv() + if err != nil { + return err + } + return proto.Unmarshal(data, msg) +} + +// +// terminal messages +// + +// SendError terminates the stream and sends the error to the remote. It is a no-op if +// the stream is already terminated. +func (s *Stream) SendError(serr error) (err error) { + defer mon.Task()(s.monCtx())(&err) + + s.mu.Lock() + if s.sigs.term.IsSet() { + s.mu.Unlock() + return nil + } + + s.writeMu.Lock() + defer s.writeMu.Unlock() + defer s.checkFinished() + + s.sigs.send.Set(io.EOF) // in this state, gRPC returns io.EOF on send. + s.terminate(drpc.Error.New("stream terminated by sending error")) + s.mu.Unlock() + + return s.checkCancelError(s.sendPacket(drpcwire.KindError, drpcwire.MarshalError(serr))) +} + +// Close terminates the stream and sends that the stream has been closed to the remote. +// It is a no-op if the stream is already terminated. +func (s *Stream) Close() (err error) { + defer mon.Task()(s.monCtx())(&err) + + s.mu.Lock() + if s.sigs.term.IsSet() { + s.mu.Unlock() + return nil + } + + s.writeMu.Lock() + defer s.writeMu.Unlock() + defer s.checkFinished() + + s.terminate(drpc.Error.New("stream terminated by sending close")) + s.mu.Unlock() + + return s.checkCancelError(s.sendPacket(drpcwire.KindClose, nil)) +} + +// CloseSend informs the remote that no more messages will be sent. If the remote has +// also already issued a CloseSend, the stream is terminated. It is a no-op if the +// stream already has sent a CloseSend or if it is terminated. +func (s *Stream) CloseSend() (err error) { + defer mon.Task()(s.monCtx())(&err) + + s.mu.Lock() + if s.sigs.send.IsSet() || s.sigs.term.IsSet() { + s.mu.Unlock() + return nil + } + + s.writeMu.Lock() + defer s.writeMu.Unlock() + defer s.checkFinished() + + s.sigs.send.Set(drpc.Error.New("send closed")) + s.terminateIfBothClosed() + s.mu.Unlock() + + return s.checkCancelError(s.sendPacket(drpcwire.KindCloseSend, nil)) +} + +// Cancel transitions the stream into a state where all writes to the transport will return +// the provided error, and terminates the stream. It is a no-op if the stream is already +// terminated. +func (s *Stream) Cancel(err error) { + defer mon.Task()(s.monCtx())(nil) + + s.mu.Lock() + defer s.mu.Unlock() + + if s.sigs.term.IsSet() { + return + } + + s.sigs.cancel.Set(err) + s.sigs.send.Set(io.EOF) // in this state, gRPC returns io.EOF on send. + s.terminate(err) +} diff --git a/vendor/storj.io/drpc/drpcwire/README.md b/vendor/storj.io/drpc/drpcwire/README.md new file mode 100644 index 000000000..04f4f41bc --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/README.md @@ -0,0 +1,253 @@ +# package drpcwire + +`import "storj.io/drpc/drpcwire"` + +Package drpcwire provides low level helpers for the drpc wire protocol. + +## Usage + +#### func AppendFrame + +```go +func AppendFrame(buf []byte, fr Frame) []byte +``` +AppendFrame appends a marshaled form of the frame to the provided buffer. + +#### func AppendVarint + +```go +func AppendVarint(buf []byte, x uint64) []byte +``` +AppendVarint appends the varint encoding of x to the buffer and returns it. + +#### func MarshalError + +```go +func MarshalError(err error) []byte +``` +MarshalError returns a byte form of the error with any error code incorporated. + +#### func ReadVarint + +```go +func ReadVarint(buf []byte) (rem []byte, out uint64, ok bool, err error) +``` +ReadVarint reads a varint encoded integer from the front of buf, returning the +remaining bytes, the value, and if there was a success. if ok is false, the +returned buffer is the same as the passed in buffer. + +#### func SplitFrame + +```go +func SplitFrame(data []byte, atEOF bool) (int, []byte, error) +``` +SplitFrame is used by bufio.Scanner to split frames out of a stream of bytes. + +#### func SplitN + +```go +func SplitN(pkt Packet, n int, cb func(fr Frame) error) error +``` +SplitN splits the marshaled form of the Packet into a number of frames such that +each frame is at most n bytes. It calls the callback with every such frame. If n +is zero, a default of 1024 is used. + +#### func UnmarshalError + +```go +func UnmarshalError(data []byte) error +``` +UnmarshalError unmarshals the marshaled error to one with a code. + +#### type Frame + +```go +type Frame struct { + // Data is the payload of bytes. + Data []byte + + // ID is used so that the frame can be reconstructed. + ID ID + + // Kind is the kind of the payload. + Kind Kind + + // Done is true if this is the last frame for the ID. + Done bool + + // Control is true if the frame has the control bit set. + Control bool +} +``` + +Frame is a split data frame on the wire. + +#### func ParseFrame + +```go +func ParseFrame(buf []byte) (rem []byte, fr Frame, ok bool, err error) +``` +ParseFrame attempts to parse a frame at the beginning of buf. If successful then +rem contains the unparsed data, fr contains the parsed frame, ok will be true, +and err will be nil. If there is not enough data for a frame, ok will be false +and err will be nil. If the data in the buf is malformed, then an error is +returned. + +#### type ID + +```go +type ID struct { + // Stream is the stream identifier. + Stream uint64 + + // Message is the message identifier. + Message uint64 +} +``` + +ID represents a packet id. + +#### func (ID) Less + +```go +func (i ID) Less(j ID) bool +``` +Less returns true if the id is less than the provided one. An ID is less than +another if the Stream is less, and if the stream is equal, if the Message is +less. + +#### func (ID) String + +```go +func (i ID) String() string +``` +String returns a human readable form of the ID. + +#### type Kind + +```go +type Kind uint8 +``` + +Kind is the enumeration of all the different kinds of messages drpc sends. + +```go +const ( + + // KindInvoke is used to invoke an rpc. The body is the name of the rpc. + KindInvoke Kind = 1 + + // KindMessage is used to send messages. The body is a protobuf. + KindMessage Kind = 2 + + // KindError is used to inform that an error happened. The body is an error + // with a code attached. + KindError Kind = 3 + + // KindClose is used to inform that the rpc is dead. It has no body. + KindClose Kind = 5 + + // KindCloseSend is used to inform that no more messages will be sent. + // It has no body. + KindCloseSend Kind = 6 // body must be empty + + // KindInvokeMetadata includes metadata about the next Invoke packet. + KindInvokeMetadata Kind = 7 +) +``` + +#### func (Kind) String + +```go +func (i Kind) String() string +``` + +#### type Packet + +```go +type Packet struct { + // Data is the payload of the packet. + Data []byte + + // ID is the identifier for the packet. + ID ID + + // Kind is the kind of the packet. + Kind Kind +} +``` + +Packet is a single message sent by drpc. + +#### func (Packet) String + +```go +func (p Packet) String() string +``` +String returns a human readable form of the packet. + +#### type Reader + +```go +type Reader struct { +} +``` + +Reader reconstructs packets from frames read from an io.Reader. + +#### func NewReader + +```go +func NewReader(r io.Reader) *Reader +``` +NewReader constructs a Reader to read Packets from the io.Reader. + +#### func (*Reader) ReadPacket + +```go +func (s *Reader) ReadPacket() (pkt Packet, err error) +``` +ReadPacket reads a packet from the io.Reader. IDs read from frames must be +monotonically increasing. When a new ID is read, the old data is discarded. This +allows for easier asynchronous interrupts. If the amount of data in the Packet +becomes too large, an error is returned. + +#### type Writer + +```go +type Writer struct { +} +``` + +Writer is a helper to buffer and write packets and frames to an io.Writer. + +#### func NewWriter + +```go +func NewWriter(w io.Writer, size int) *Writer +``` +NewWriter returns a Writer that will attempt to buffer size data before sending +it to the io.Writer. + +#### func (*Writer) Flush + +```go +func (b *Writer) Flush() (err error) +``` +Flush forces a flush of any buffered data to the io.Writer. It is a no-op if +there is no data in the buffer. + +#### func (*Writer) WriteFrame + +```go +func (b *Writer) WriteFrame(fr Frame) (err error) +``` +WriteFrame appends the frame into the buffer, and if the buffer is larger than +the configured size, flushes it. + +#### func (*Writer) WritePacket + +```go +func (b *Writer) WritePacket(pkt Packet) (err error) +``` +WritePacket writes the packet as a single frame, ignoring any size constraints. diff --git a/vendor/storj.io/drpc/drpcwire/doc.go b/vendor/storj.io/drpc/drpcwire/doc.go new file mode 100644 index 000000000..c41753e19 --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/doc.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package drpcwire provides low level helpers for the drpc wire protocol. +package drpcwire + +import "github.com/spacemonkeygo/monkit/v3" + +var mon = monkit.Package() diff --git a/vendor/storj.io/drpc/drpcwire/error.go b/vendor/storj.io/drpc/drpcwire/error.go new file mode 100644 index 000000000..605ef7208 --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/error.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcwire + +import ( + "encoding/binary" + + "github.com/zeebo/errs" + + "storj.io/drpc/drpcerr" +) + +// MarshalError returns a byte form of the error with any error code incorporated. +func MarshalError(err error) []byte { + var buf [8]byte + binary.BigEndian.PutUint64(buf[:], drpcerr.Code(err)) + return append(buf[:], err.Error()...) +} + +// UnmarshalError unmarshals the marshaled error to one with a code. +func UnmarshalError(data []byte) error { + if len(data) < 8 { + return errs.New("%s (drpcwire note: invalid error data)", data) + } + return drpcerr.WithCode(errs.New("%s", data[8:]), binary.BigEndian.Uint64(data[:8])) +} diff --git a/vendor/storj.io/drpc/drpcwire/packet.go b/vendor/storj.io/drpc/drpcwire/packet.go new file mode 100644 index 000000000..8a4fdc538 --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/packet.go @@ -0,0 +1,160 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcwire + +import "fmt" + +//go:generate stringer -type=Kind -trimprefix=Kind_ -output=packet_string.go + +// Kind is the enumeration of all the different kinds of messages drpc sends. +type Kind uint8 + +const ( + // kindReserved is saved for the future in case we need to extend. + kindReserved Kind = 0 + + // kindCancelDeprecated is a reminder that we once used this kind value. + kindCancelDeprecated Kind = 4 + + // KindInvoke is used to invoke an rpc. The body is the name of the rpc. + KindInvoke Kind = 1 + + // KindMessage is used to send messages. The body is a protobuf. + KindMessage Kind = 2 + + // KindError is used to inform that an error happened. The body is an error + // with a code attached. + KindError Kind = 3 + + // KindClose is used to inform that the rpc is dead. It has no body. + KindClose Kind = 5 + + // KindCloseSend is used to inform that no more messages will be sent. + // It has no body. + KindCloseSend Kind = 6 // body must be empty + + // KindInvokeMetadata includes metadata about the next Invoke packet. + KindInvokeMetadata Kind = 7 +) + +// +// packet id +// + +// ID represents a packet id. +type ID struct { + // Stream is the stream identifier. + Stream uint64 + + // Message is the message identifier. + Message uint64 +} + +// Less returns true if the id is less than the provided one. An ID is less than +// another if the Stream is less, and if the stream is equal, if the Message +// is less. +func (i ID) Less(j ID) bool { + return i.Stream < j.Stream || (i.Stream == j.Stream && i.Message < j.Message) +} + +// String returns a human readable form of the ID. +func (i ID) String() string { return fmt.Sprintf("<%d,%d>", i.Stream, i.Message) } + +// +// data frame +// + +// Frame is a split data frame on the wire. +type Frame struct { + // Data is the payload of bytes. + Data []byte + + // ID is used so that the frame can be reconstructed. + ID ID + + // Kind is the kind of the payload. + Kind Kind + + // Done is true if this is the last frame for the ID. + Done bool + + // Control is true if the frame has the control bit set. + Control bool +} + +// ParseFrame attempts to parse a frame at the beginning of buf. If successful +// then rem contains the unparsed data, fr contains the parsed frame, ok will +// be true, and err will be nil. If there is not enough data for a frame, ok +// will be false and err will be nil. If the data in the buf is malformed, then +// an error is returned. +func ParseFrame(buf []byte) (rem []byte, fr Frame, ok bool, err error) { + var length uint64 + var control byte + if len(buf) < 4 { + goto bad + } + + rem, control = buf[1:], buf[0] + fr.Done = (control & 0b00000001) > 0 + fr.Control = (control & 0b10000000) > 0 + fr.Kind = Kind((control & 0b01111110) >> 1) + rem, fr.ID.Stream, ok, err = ReadVarint(rem) + if !ok || err != nil { + goto bad + } + rem, fr.ID.Message, ok, err = ReadVarint(rem) + if !ok || err != nil { + goto bad + } + rem, length, ok, err = ReadVarint(rem) + if !ok || err != nil || length > uint64(len(rem)) { + goto bad + } + rem, fr.Data = rem[length:], rem[:length] + + return rem, fr, true, nil +bad: + return buf, fr, false, err +} + +// AppendFrame appends a marshaled form of the frame to the provided buffer. +func AppendFrame(buf []byte, fr Frame) []byte { + control := byte(fr.Kind << 1) + if fr.Done { + control |= 0b00000001 + } + if fr.Control { + control |= 0b10000000 + } + + out := buf + out = append(out, control) + out = AppendVarint(out, fr.ID.Stream) + out = AppendVarint(out, fr.ID.Message) + out = AppendVarint(out, uint64(len(fr.Data))) + out = append(out, fr.Data...) + return out +} + +// +// packet +// + +// Packet is a single message sent by drpc. +type Packet struct { + // Data is the payload of the packet. + Data []byte + + // ID is the identifier for the packet. + ID ID + + // Kind is the kind of the packet. + Kind Kind +} + +// String returns a human readable form of the packet. +func (p Packet) String() string { + return fmt.Sprintf("", + p.ID.Stream, p.ID.Message, p.Kind, len(p.Data)) +} diff --git a/vendor/storj.io/drpc/drpcwire/packet_string.go b/vendor/storj.io/drpc/drpcwire/packet_string.go new file mode 100644 index 000000000..5b7e452f5 --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/packet_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type=Kind -trimprefix=Kind_ -output=packet_string.go"; DO NOT EDIT. + +package drpcwire + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[kindReserved-0] + _ = x[kindCancelDeprecated-4] + _ = x[KindInvoke-1] + _ = x[KindMessage-2] + _ = x[KindError-3] + _ = x[KindClose-5] + _ = x[KindCloseSend-6] + _ = x[KindInvokeMetadata-7] +} + +const _Kind_name = "kindReservedKindInvokeKindMessageKindErrorkindCancelDeprecatedKindCloseKindCloseSendKindInvokeMetadata" + +var _Kind_index = [...]uint8{0, 12, 22, 33, 42, 62, 71, 84, 102} + +func (i Kind) String() string { + if i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/storj.io/drpc/drpcwire/split.go b/vendor/storj.io/drpc/drpcwire/split.go new file mode 100644 index 000000000..cfe7662b9 --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/split.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcwire + +// SplitN splits the marshaled form of the Packet into a number of +// frames such that each frame is at most n bytes. It calls +// the callback with every such frame. If n is zero, a default of +// 1024 is used. +func SplitN(pkt Packet, n int, cb func(fr Frame) error) error { + switch { + case n == 0: + n = 1024 + case n < 0: + n = 0 + } + + for { + fr := Frame{ + Data: pkt.Data, + ID: pkt.ID, + Kind: pkt.Kind, + Done: true, + } + if len(pkt.Data) > n && n > 0 { + fr.Data, pkt.Data = pkt.Data[:n], pkt.Data[n:] + fr.Done = false + } + if err := cb(fr); err != nil { + return err + } + if fr.Done { + return nil + } + } +} diff --git a/vendor/storj.io/drpc/drpcwire/transport.go b/vendor/storj.io/drpc/drpcwire/transport.go new file mode 100644 index 000000000..3eacdb8c3 --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/transport.go @@ -0,0 +1,158 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcwire + +import ( + "bufio" + "io" + "sync" + + "storj.io/drpc" +) + +// +// Writer +// + +// Writer is a helper to buffer and write packets and frames to an io.Writer. +type Writer struct { + w io.Writer + size int + mu sync.Mutex + buf []byte +} + +// NewWriter returns a Writer that will attempt to buffer size data before +// sending it to the io.Writer. +func NewWriter(w io.Writer, size int) *Writer { + if size == 0 { + size = 1024 + } + + return &Writer{ + w: w, + size: size, + buf: make([]byte, 0, size), + } +} + +// WritePacket writes the packet as a single frame, ignoring any size +// constraints. +func (b *Writer) WritePacket(pkt Packet) (err error) { + return b.WriteFrame(Frame{ + Data: pkt.Data, + ID: pkt.ID, + Kind: pkt.Kind, + Done: true, + }) +} + +// WriteFrame appends the frame into the buffer, and if the buffer is larger +// than the configured size, flushes it. +func (b *Writer) WriteFrame(fr Frame) (err error) { + b.mu.Lock() + b.buf = AppendFrame(b.buf, fr) + if len(b.buf) >= b.size { + _, err = b.w.Write(b.buf) + b.buf = b.buf[:0] + } + b.mu.Unlock() + return err +} + +// Flush forces a flush of any buffered data to the io.Writer. It is a no-op if +// there is no data in the buffer. +func (b *Writer) Flush() (err error) { + defer mon.Task()(nil)(&err) + + b.mu.Lock() + if len(b.buf) > 0 { + _, err = b.w.Write(b.buf) + b.buf = b.buf[:0] + } + b.mu.Unlock() + return err +} + +// +// Reader +// + +// SplitFrame is used by bufio.Scanner to split frames out of a stream of bytes. +func SplitFrame(data []byte, atEOF bool) (int, []byte, error) { + rem, _, ok, err := ParseFrame(data) + switch advance := len(data) - len(rem); { + case err != nil: + return 0, nil, err + case len(data) > 0 && !ok && atEOF: + return 0, nil, drpc.ProtocolError.New("truncated frame") + case !ok: + return 0, nil, nil + case advance < 0, len(data) < advance: + return 0, nil, drpc.InternalError.New("scanner issue with advance value") + default: + return advance, data[:advance], nil + } +} + +// Reader reconstructs packets from frames read from an io.Reader. +type Reader struct { + buf *bufio.Scanner + id ID +} + +// NewReader constructs a Reader to read Packets from the io.Reader. +func NewReader(r io.Reader) *Reader { + buf := bufio.NewScanner(r) + buf.Buffer(make([]byte, 4<<10), 1<<20) + buf.Split(SplitFrame) + return &Reader{buf: buf} +} + +// ReadPacket reads a packet from the io.Reader. IDs read from frames +// must be monotonically increasing. When a new ID is read, the old +// data is discarded. This allows for easier asynchronous interrupts. +// If the amount of data in the Packet becomes too large, an error is +// returned. +func (s *Reader) ReadPacket() (pkt Packet, err error) { + defer mon.Task()(nil)(&err) + + for s.buf.Scan() { + rem, fr, ok, err := ParseFrame(s.buf.Bytes()) + switch { + case err != nil: + return Packet{}, drpc.ProtocolError.Wrap(err) + case !ok, len(rem) > 0: + return Packet{}, drpc.InternalError.New("problem with scanner") + case fr.Control: + // Ignore any frames with the control bit set so that we can + // use it in the future to mean things to people who understand + // it. + continue + case fr.ID.Less(s.id): + return Packet{}, drpc.ProtocolError.New("id monotonicity violation") + case s.id.Less(fr.ID): + s.id = fr.ID + pkt = Packet{ + Data: pkt.Data[:0], + ID: fr.ID, + Kind: fr.Kind, + } + case fr.Kind != pkt.Kind: + return Packet{}, drpc.ProtocolError.New("packet kind change") + } + + pkt.Data = append(pkt.Data, fr.Data...) + switch { + case len(pkt.Data) > 4<<20: + return Packet{}, drpc.ProtocolError.New("data overflow") + case fr.Done: + return pkt, nil + } + } + if err := s.buf.Err(); err != nil { + return Packet{}, err + } + return Packet{}, io.EOF +} diff --git a/vendor/storj.io/drpc/drpcwire/varint.go b/vendor/storj.io/drpc/drpcwire/varint.go new file mode 100644 index 000000000..b76d15aea --- /dev/null +++ b/vendor/storj.io/drpc/drpcwire/varint.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package drpcwire + +import "storj.io/drpc" + +// ReadVarint reads a varint encoded integer from the front of buf, returning the +// remaining bytes, the value, and if there was a success. if ok is false, the +// returned buffer is the same as the passed in buffer. +func ReadVarint(buf []byte) (rem []byte, out uint64, ok bool, err error) { + rem = buf + for shift := uint(0); shift < 64; shift += 7 { + if len(rem) == 0 { + return buf, 0, false, nil + } + val := uint64(rem[0]) + out, rem = out|((val&127)<= 128 { + buf = append(buf, byte(x&127|128)) + x >>= 7 + } + return append(buf, byte(x)) +} diff --git a/vendor/storj.io/drpc/go.mod b/vendor/storj.io/drpc/go.mod new file mode 100644 index 000000000..4c560eb0f --- /dev/null +++ b/vendor/storj.io/drpc/go.mod @@ -0,0 +1,10 @@ +module storj.io/drpc + +go 1.14 + +require ( + github.com/gogo/protobuf v1.2.1 + github.com/spacemonkeygo/monkit/v3 v3.0.4 + github.com/zeebo/assert v1.1.0 + github.com/zeebo/errs v1.2.2 +) diff --git a/vendor/storj.io/drpc/go.sum b/vendor/storj.io/drpc/go.sum new file mode 100644 index 000000000..ac65c599f --- /dev/null +++ b/vendor/storj.io/drpc/go.sum @@ -0,0 +1,20 @@ +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/spacemonkeygo/monkit/v3 v3.0.4 h1:Ay+PZirv+qfd4sqcT+X/U3BnC7AcIaqp/IXh0oV36k8= +github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a h1:8+cCjxhToanKmxLIbuyBNe2EnpgwhiivsIaRJstDRFA= +github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= +github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/storj.io/drpc/staticcheck.conf b/vendor/storj.io/drpc/staticcheck.conf new file mode 100644 index 000000000..62ca9d4a4 --- /dev/null +++ b/vendor/storj.io/drpc/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1008"] diff --git a/vendor/storj.io/uplink/.clabot b/vendor/storj.io/uplink/.clabot new file mode 100644 index 000000000..d8393a410 --- /dev/null +++ b/vendor/storj.io/uplink/.clabot @@ -0,0 +1,63 @@ +{ + "contributors": [ + "aleitner", + "aligeti", + "Barterio", + "brimstone", + "bryanchriswhite", + "cam-a", + "coyle", + "crawter", + "dylanlott", + "egonelbre", + "fadila82", + "iglesiasbrandon", + "jenlij", + "jhagans3", + "jtolds", + "kaloyan-raev", + "littleskunk", + "mniewrzal", + "mobyvb", + "navillasa", + "nfarah86", + "NikolaiYurchenko", + "phutchins", + "rikysya", + "stefanbenten", + "thepaul", + "wthorp", + "RichardLitt", + "fuskovic", + "keleffew", + "oCroso", + "pgerbes1", + "JessicaGreben", + "benjaminsirb", + "simongui", + "ifraixedes", + "VinozzZ", + "zeebo", + "barlock", + "sndrr", + "BlackDuck888", + "3bl3gamer", + "ethanadams", + "ReneSmeekes", + "VitaliiShpital", + "isaachess", + "azdagron", + "phthano", + "nerdatwork", + "kmozurkewich", + "TopperDEL", + "kristaxox", + "calebcase", + "mbouzi", + "AlexeyALeonov", + "Qweder93", + "cpustejovsky", + "grafael", + "ihaid" + ] +} diff --git a/vendor/storj.io/uplink/.gitignore b/vendor/storj.io/uplink/.gitignore new file mode 100644 index 000000000..20d5cf778 --- /dev/null +++ b/vendor/storj.io/uplink/.gitignore @@ -0,0 +1,37 @@ +# Mac OS X files +.DS_Store + +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +*.db + +# Test binary, build with `go test -c` +*.test +*.prof + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# VSCode +/.vscode +debug + +# Visual Studio +/.vs + +# Jetbrains +.idea/ + +*.coverprofile +*.log + +/release/ +*.swp +/bin +resource.syso + +*.resource.go +/.build/ \ No newline at end of file diff --git a/vendor/storj.io/uplink/CODE_OF_CONDUCT.md b/vendor/storj.io/uplink/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..ef2f1b5b8 --- /dev/null +++ b/vendor/storj.io/uplink/CODE_OF_CONDUCT.md @@ -0,0 +1,53 @@ +# Code of Conduct +Storj strives to create a welcoming and thriving environment. In order to ensure that every community member is respected and supported, we’ve adopted some general community guidelines. + +## Our Pledge + +We value diversity and inclusiveness in our community and encourage participation by fostering an open and welcoming environment. We as contributors and maintainers pledge to keep our community respectful and inclusive. By participating in our project, you pledge to keep our community a harassment-free experience for everyone, regardless of race, color, religion, gender, sexual orientation, national origin, age, disability, pregnancy, marital status, body size, appearance, education, level of experience, education, or socioeconomic status. + +## Our Standards + +### Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +### Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and agree to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned with this Code of Conduct, or to ban temporarily or permanently any contributor for any behavior that they deem inappropriate, threatening, offensive, or harmful. + +## Scope +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community.Examples of representing a project or community include using an official project email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported anonymously or by contacting the project team at conduct@storj.io. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership or maintainers. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/vendor/storj.io/uplink/Jenkinsfile b/vendor/storj.io/uplink/Jenkinsfile new file mode 100644 index 000000000..b7a1c1246 --- /dev/null +++ b/vendor/storj.io/uplink/Jenkinsfile @@ -0,0 +1,133 @@ +pipeline { + agent { + docker { + label 'main' + image docker.build("storj-ci", "--pull https://github.com/storj/ci.git").id + args '-u root:root --cap-add SYS_PTRACE -v "/tmp/gomod":/go/pkg/mod' + } + } + options { + timeout(time: 26, unit: 'MINUTES') + } + environment { + NPM_CONFIG_CACHE = '/tmp/npm/cache' + } + stages { + stage('Build') { + steps { + checkout scm + + sh 'mkdir -p .build' + + sh 'service postgresql start' + + sh 'cockroach start-single-node --insecure --store=\'/tmp/crdb\' --listen-addr=localhost:26257 --http-addr=localhost:8080 --cache 512MiB --max-sql-memory 512MiB --background' + } + } + + stage('Verification') { + parallel { + stage('Lint') { + steps { + sh 'check-copyright' + sh 'check-large-files' + sh 'check-imports ./...' + sh 'check-peer-constraints' + sh 'storj-protobuf --protoc=$HOME/protoc/bin/protoc lint' + sh 'storj-protobuf --protoc=$HOME/protoc/bin/protoc check-lock' + sh 'check-atomic-align ./...' + sh 'check-errs ./...' + sh './scripts/check-dependencies.sh' + sh 'staticcheck ./...' + sh 'golangci-lint --config /go/ci/.golangci.yml -j=2 run' + sh 'go-licenses check ./...' + sh './scripts/check-libuplink-size.sh' + } + } + + stage('Tests') { + environment { + COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=./...'}" + } + steps { + sh 'go vet ./...' + sh 'go test -parallel 4 -p 6 -vet=off $COVERFLAGS -timeout 20m -json -race ./... 2>&1 | tee .build/tests.json | xunit -out .build/tests.xml' + // TODO enable this later + // sh 'check-clean-directory' + } + + post { + always { + sh script: 'cat .build/tests.json | tparse -all -top -slow 100', returnStatus: true + archiveArtifacts artifacts: '.build/tests.json' + junit '.build/tests.xml' + + script { + if(fileExists(".build/coverprofile")){ + sh script: 'filter-cover-profile < .build/coverprofile > .build/clean.coverprofile', returnStatus: true + sh script: 'gocov convert .build/clean.coverprofile > .build/cover.json', returnStatus: true + sh script: 'gocov-xml < .build/cover.json > .build/cobertura.xml', returnStatus: true + cobertura coberturaReportFile: '.build/cobertura.xml' + } + } + } + } + } + + stage('Testsuite') { + environment { + STORJ_COCKROACH_TEST = 'cockroach://root@localhost:26257/testcockroach?sslmode=disable' + STORJ_POSTGRES_TEST = 'postgres://postgres@localhost/teststorj?sslmode=disable' + COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=./...'}" + } + steps { + sh 'cockroach sql --insecure --host=localhost:26257 -e \'create database testcockroach;\'' + sh 'psql -U postgres -c \'create database teststorj;\'' + dir('testsuite'){ + sh 'go vet ./...' + sh 'go test -parallel 4 -p 6 -vet=off $COVERFLAGS -timeout 20m -json -race ./... 2>&1 | tee ../.build/testsuite.json | xunit -out ../.build/testsuite.xml' + } + // TODO enable this later + // sh 'check-clean-directory' + } + + post { + always { + sh script: 'cat .build/testsuite.json | tparse -all -top -slow 100', returnStatus: true + archiveArtifacts artifacts: '.build/testsuite.json' + junit '.build/testsuite.xml' + } + } + } + + stage('Integration [storj/storj]') { + environment { + STORJ_POSTGRES_TEST = 'postgres://postgres@localhost/teststorj2?sslmode=disable' + } + steps { + sh 'psql -U postgres -c \'create database teststorj2;\'' + dir('testsuite'){ + sh 'go vet storj.io/storj/...' + sh 'go test -parallel 4 -p 6 -vet=off -timeout 20m -json storj.io/storj/... 2>&1 | tee ../.build/testsuite-storj.json | xunit -out ../.build/testsuite-storj.xml' + } + } + + post { + always { + sh script: 'cat .build/testsuite-storj.json | tparse -all -top -slow 100', returnStatus: true + archiveArtifacts artifacts: '.build/testsuite-storj.json' + junit '.build/testsuite-storj.xml' + } + } + } + } + } + } + + post { + always { + sh "chmod -R 777 ." // ensure Jenkins agent can delete the working directory + deleteDir() + } + } +} diff --git a/vendor/storj.io/uplink/LICENSE b/vendor/storj.io/uplink/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/storj.io/uplink/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/storj.io/uplink/MAINTAINERS.md b/vendor/storj.io/uplink/MAINTAINERS.md new file mode 100644 index 000000000..b73b7c764 --- /dev/null +++ b/vendor/storj.io/uplink/MAINTAINERS.md @@ -0,0 +1,50 @@ +# How to release a new version + +## When to release a new version + +New version should be released when we are ready to make changes generally available. + +New version should not be released if we want to test our latest changes or to make them available to a limited number of users. This can be achieved without releasing a new version. Go modules allow to point to latest master or specific Git commit. However, commits from master are not suitable for use in production unless they are a release tag. Do not use non-release commits in downstream projects in production. + +Consider releasing a new Release Candidate version to make changes available to a larger group of users, if we are not ready to make them available to everyone yet. + +Under no circumstances may releases be done during the weekend or the wee hours of the night. + +## Version numbers + +We follow the rules for semantic versioning, but prefixed with the letter `v`. + +Examples of official releases: +- `v1.0.0` +- `v1.0.3` +- `v1.2.3` +- `v2.1.7` + +Examples of Release Candidates: +- `v1.0.0-rc.4` +- `v2.1.0-rc.1` + +## Step-by-step release process + +1. Clone or fetch latest master of these Git repositories: + - https://github.com/storj/storj + - https://github.com/storj/gateway + - https://github.com/storj/uplink-c +2. For each of them update, `go.mod` and `testsuite/go.mod` to latest master (or the specific Git commit that will be tagged as a new version) of `storj.io/uplink`. +3. Use `go mod tidy` to update the respective `go.sum` files. +4. Push a change to Gerrit with the updated `go.mod` and `go.sum` files for each of the Git repositories. +5. Wait for the build to finish. If the build fails for any of the Git repositories, abort the release process. Investigate the issue, fix it, and start over the release process. +6. If all builds are successful, do not merge the changes yet. +7. If you haven't done this yet, announce your intention to make a new release to the #libuplink Slack channel. +8. Wait for a confirmation by at least one maintainer of this project (storj/uplink) before proceeding with the next step. +9. Create a new release from the Github web interface: + - Go to https://github.com/storj/uplink/releases. + - Click the `Draft a new release` button. + - Enter `Tag version` following the rules for the version number, e.g. `v1.2.3`. + - Enter the same value as `Release title`, e.g. `v1.2.3`. + - If there are new commits in master since you executed step 1, do not include them in the release. Change the `Target` from `master` to the specific Git commit used in step 1. + - Describe the changes since the previous release in a human-readable way. Only those changes that affect users. No need to describe refactorings, etc. + - If you are releasing a new Release Candidate, select the `This is a pre-release` checkbox. + - Click the `Publish release` button. +10. Update the Gerrit changes from step 1 with the new official version number. +11. Wait for the build to finish again and merge them. diff --git a/vendor/storj.io/uplink/Makefile b/vendor/storj.io/uplink/Makefile new file mode 100644 index 000000000..ea0968929 --- /dev/null +++ b/vendor/storj.io/uplink/Makefile @@ -0,0 +1,7 @@ +.PHONY: bump-dependencies +bump-dependencies: + go get storj.io/common@master + go mod tidy + cd testsuite;\ + go get storj.io/common@master storj.io/storj@master storj.io/uplink@master;\ + go mod tidy \ No newline at end of file diff --git a/vendor/storj.io/uplink/README.md b/vendor/storj.io/uplink/README.md new file mode 100644 index 000000000..7a632ca6d --- /dev/null +++ b/vendor/storj.io/uplink/README.md @@ -0,0 +1,48 @@ +# Libuplink + +Go library for Storj V3 Network. + +[![Go Report Card](https://goreportcard.com/badge/storj.io/uplink)](https://goreportcard.com/report/storj.io/uplink) +[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://pkg.go.dev/storj.io/uplink) +![Beta](https://img.shields.io/badge/version-beta-green.svg) + + + +Storj is building a decentralized cloud storage network. +[Check out our white paper for more info!](https://storj.io/white-paper) + +---- + +Storj is an S3-compatible platform and suite of decentralized applications that +allows you to store data in a secure and decentralized manner. Your files are +encrypted, broken into little pieces and stored in a global decentralized +network of computers. Luckily, we also support allowing you (and only you) to +retrieve those files! + +### A Note about Versioning + +Our versioning in this repo is intended to primarily support the expectations of the +[Go modules](https://blog.golang.org/using-go-modules) system, so you can expect that +within a major version release, backwards-incompatible changes will be avoided at high +cost. This is not the case with our downstream repo, +[storj/storj](https://github.com/storj/storj/). + +# Documentation + +- [Go Doc](https://pkg.go.dev/storj.io/uplink) +- [Libuplink Walkthrough](https://github.com/storj/storj/wiki/Libuplink-Walkthrough) + +# Language bindings + +- [Uplink-C](https://github.com/storj/uplink-c) + +# License + +This library is distributed under the +[Apache v2](https://www.apache.org/licenses/LICENSE-2.0) license. + +# Support + +If you have any questions or suggestions please reach out to us on +[our community forum](https://forum.storj.io/) or +email us at support@tardigrade.io. diff --git a/vendor/storj.io/uplink/access.go b/vendor/storj.io/uplink/access.go new file mode 100644 index 000000000..a238d3f92 --- /dev/null +++ b/vendor/storj.io/uplink/access.go @@ -0,0 +1,314 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + "strings" + "time" + + "github.com/btcsuite/btcutil/base58" + "github.com/zeebo/errs" + + "storj.io/common/encryption" + "storj.io/common/macaroon" + "storj.io/common/paths" + "storj.io/common/pb" + "storj.io/common/storj" + "storj.io/uplink/internal/expose" +) + +// An Access Grant contains everything to access a project and specific buckets. +// It includes a potentially-restricted API Key, a potentially-restricted set +// of encryption information, and information about the Satellite responsible +// for the project's metadata. +type Access struct { + satelliteAddress string + apiKey *macaroon.APIKey + encAccess *encryptionAccess +} + +// SharePrefix defines a prefix that will be shared. +type SharePrefix struct { + Bucket string + // Prefix is the prefix of the shared object keys. + // + // Note: that within a bucket, the hierarchical key derivation scheme is + // delineated by forward slashes (/), so encryption information will be + // included in the resulting access grant to decrypt any key that shares + // the same prefix up until the last slash. + Prefix string +} + +// Permission defines what actions can be used to share. +type Permission struct { + // AllowDownload gives permission to download the object's content. It + // allows getting object metadata, but it does not allow listing buckets. + AllowDownload bool + // AllowUpload gives permission to create buckets and upload new objects. + // It does not allow overwriting existing objects unless AllowDelete is + // granted too. + AllowUpload bool + // AllowList gives permission to list buckets. It allows getting object + // metadata, but it does not allow downloading the object's content. + AllowList bool + // AllowDelete gives permission to delete buckets and objects. Unless + // either AllowDownload or AllowList is granted too, no object metadata and + // no error info will be returned for deleted objects. + AllowDelete bool + // NotBefore restricts when the resulting access grant is valid for. + // If set, the resulting access grant will not work if the Satellite + // believes the time is before NotBefore. + // If set, this value should always be before NotAfter. + NotBefore time.Time + // NotAfter restricts when the resulting access grant is valid for. + // If set, the resulting access grant will not work if the Satellite + // believes the time is after NotAfter. + // If set, this value should always be after NotBefore. + NotAfter time.Time +} + +// ParseAccess parses a serialized access grant string. +// +// This should be the main way to instantiate an access grant for opening a project. +// See the note on RequestAccessWithPassphrase. +func ParseAccess(access string) (*Access, error) { + data, version, err := base58.CheckDecode(access) + if err != nil || version != 0 { + return nil, packageError.New("invalid access grant format") + } + + p := new(pb.Scope) + if err := pb.Unmarshal(data, p); err != nil { + return nil, packageError.New("unable to unmarshal access grant: %v", err) + } + + if len(p.SatelliteAddr) == 0 { + return nil, packageError.New("access grant is missing satellite address") + } + + apiKey, err := macaroon.ParseRawAPIKey(p.ApiKey) + if err != nil { + return nil, packageError.New("access grant has malformed api key: %v", err) + } + + encAccess, err := parseEncryptionAccessFromProto(p.EncryptionAccess) + if err != nil { + return nil, packageError.New("access grant has malformed encryption access: %v", err) + } + + return &Access{ + satelliteAddress: p.SatelliteAddr, + apiKey: apiKey, + encAccess: encAccess, + }, nil +} + +// Serialize serializes an access grant such that it can be used later with +// ParseAccess or other tools. +func (access *Access) Serialize() (string, error) { + switch { + case len(access.satelliteAddress) == 0: + return "", packageError.New("access grant is missing satellite address") + case access.apiKey == nil: + return "", packageError.New("access grant is missing api key") + case access.encAccess == nil: + return "", packageError.New("access grant is missing encryption access") + } + + enc, err := access.encAccess.toProto() + if err != nil { + return "", packageError.Wrap(err) + } + + data, err := pb.Marshal(&pb.Scope{ + SatelliteAddr: access.satelliteAddress, + ApiKey: access.apiKey.SerializeRaw(), + EncryptionAccess: enc, + }) + if err != nil { + return "", packageError.New("unable to marshal access grant: %v", err) + } + + return base58.CheckEncode(data, 0), nil +} + +// RequestAccessWithPassphrase generates a new access grant using a passhprase. +// It must talk to the Satellite provided to get a project-based salt for +// deterministic key derivation. +// +// Note: this is a CPU-heavy function that uses a password-based key derivation function +// (Argon2). This should be a setup-only step. Most common interactions with the library +// should be using a serialized access grant through ParseAccess directly. +func RequestAccessWithPassphrase(ctx context.Context, satelliteAddress, apiKey, passphrase string) (*Access, error) { + return (Config{}).RequestAccessWithPassphrase(ctx, satelliteAddress, apiKey, passphrase) +} + +// RequestAccessWithPassphrase generates a new access grant using a passhprase. +// It must talk to the Satellite provided to get a project-based salt for +// deterministic key derivation. +// +// Note: this is a CPU-heavy function that uses a password-based key derivation function +// (Argon2). This should be a setup-only step. Most common interactions with the library +// should be using a serialized access grant through ParseAccess directly. +func (config Config) RequestAccessWithPassphrase(ctx context.Context, satelliteAddress, apiKey, passphrase string) (*Access, error) { + return requestAccessWithPassphraseAndConcurrency(ctx, config, satelliteAddress, apiKey, passphrase, 8) +} + +func init() { + // expose this method for backcomp package. + expose.RequestAccessWithPassphraseAndConcurrency = requestAccessWithPassphraseAndConcurrency + + // expose this method for private/access package. + expose.EnablePathEncryptionBypass = enablePathEncryptionBypass +} + +// requestAccessWithPassphraseAndConcurrency requests satellite for a new access grant using a passhprase and specific concurrency for the Argon2 key derivation. +// +// NB: when modifying the signature of this func, also update backcomp and internal/expose packages. +func requestAccessWithPassphraseAndConcurrency(ctx context.Context, config Config, satelliteAddress, apiKey, passphrase string, concurrency uint8) (_ *Access, err error) { + parsedAPIKey, err := macaroon.ParseAPIKey(apiKey) + if err != nil { + return nil, packageError.Wrap(err) + } + + metainfo, _, fullNodeURL, err := config.dial(ctx, satelliteAddress, parsedAPIKey) + if err != nil { + return nil, packageError.Wrap(err) + } + defer func() { err = errs.Combine(err, metainfo.Close()) }() + + info, err := metainfo.GetProjectInfo(ctx) + if err != nil { + return nil, convertKnownErrors(err, "") + } + + key, err := encryption.DeriveRootKey([]byte(passphrase), info.ProjectSalt, "", concurrency) + if err != nil { + return nil, packageError.Wrap(err) + } + + encAccess := newEncryptionAccessWithDefaultKey(key) + encAccess.setDefaultPathCipher(storj.EncAESGCM) + return &Access{ + satelliteAddress: fullNodeURL, + apiKey: parsedAPIKey, + encAccess: encAccess, + }, nil +} + +// enablePathEncryptionBypass enables path encryption bypass for embedded encryption access. +// +// NB: when modifying the signature of this func, also update private/access and internal/expose packages. +func enablePathEncryptionBypass(access *Access) error { + access.encAccess.Store().EncryptionBypass = true + return nil +} + +// Share creates a new access grant with specific permissions. +// +// Access grants can only have their existing permissions restricted, +// and the resulting access grant will only allow for the intersection of all previous +// Share calls in the access grant construction chain. +// +// Prefixes, if provided, restrict the access grant (and internal encryption information) +// to only contain enough information to allow access to just those prefixes. +func (access *Access) Share(permission Permission, prefixes ...SharePrefix) (*Access, error) { + if permission == (Permission{}) { + return nil, packageError.New("permission is empty") + } + + var notBefore, notAfter *time.Time + if !permission.NotBefore.IsZero() { + notBefore = &permission.NotBefore + } + if !permission.NotAfter.IsZero() { + notAfter = &permission.NotAfter + } + + if notBefore != nil && notAfter != nil && notAfter.Before(*notBefore) { + return nil, packageError.New("invalid time range") + } + + caveat := macaroon.Caveat{ + DisallowReads: !permission.AllowDownload, + DisallowWrites: !permission.AllowUpload, + DisallowLists: !permission.AllowList, + DisallowDeletes: !permission.AllowDelete, + NotBefore: notBefore, + NotAfter: notAfter, + } + + sharedAccess := newEncryptionAccess() + sharedAccess.setDefaultPathCipher(access.encAccess.Store().GetDefaultPathCipher()) + if len(prefixes) == 0 { + sharedAccess.setDefaultKey(access.encAccess.Store().GetDefaultKey()) + } + + for _, prefix := range prefixes { + // If the share prefix ends in a `/` we need to remove this final slash. + // Otherwise, if we the shared prefix is `/bob/`, the encrypted shared + // prefix results in `enc("")/enc("bob")/enc("")`. This is an incorrect + // encrypted prefix, what we really want is `enc("")/enc("bob")`. + unencPath := paths.NewUnencrypted(strings.TrimSuffix(prefix.Prefix, "/")) + + encPath, err := encryption.EncryptPathWithStoreCipher(prefix.Bucket, unencPath, access.encAccess.store) + if err != nil { + return nil, err + } + derivedKey, err := encryption.DerivePathKey(prefix.Bucket, unencPath, access.encAccess.store) + if err != nil { + return nil, err + } + + if err := sharedAccess.store.Add(prefix.Bucket, unencPath, encPath, *derivedKey); err != nil { + return nil, err + } + caveat.AllowedPaths = append(caveat.AllowedPaths, &macaroon.Caveat_Path{ + Bucket: []byte(prefix.Bucket), + EncryptedPathPrefix: []byte(encPath.Raw()), + }) + } + + restrictedAPIKey, err := access.apiKey.Restrict(caveat) + if err != nil { + return nil, err + } + + restrictedAccess := &Access{ + satelliteAddress: access.satelliteAddress, + apiKey: restrictedAPIKey, + encAccess: sharedAccess, + } + return restrictedAccess, nil +} + +// ReadOnlyPermission returns a Permission that allows reading and listing +// (if the parent access grant already allows those things). +func ReadOnlyPermission() Permission { + return Permission{ + AllowDownload: true, + AllowList: true, + } +} + +// WriteOnlyPermission returns a Permission that allows writing and deleting +// (if the parent access grant already allows those things). +func WriteOnlyPermission() Permission { + return Permission{ + AllowUpload: true, + AllowDelete: true, + } +} + +// FullPermission returns a Permission that allows all actions that the +// parent access grant already allows. +func FullPermission() Permission { + return Permission{ + AllowDownload: true, + AllowUpload: true, + AllowList: true, + AllowDelete: true, + } +} diff --git a/vendor/storj.io/uplink/bucket.go b/vendor/storj.io/uplink/bucket.go new file mode 100644 index 000000000..00789ca78 --- /dev/null +++ b/vendor/storj.io/uplink/bucket.go @@ -0,0 +1,137 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + "errors" + "time" + + "github.com/zeebo/errs" + + "storj.io/common/errs2" + "storj.io/common/memory" + "storj.io/common/rpc/rpcstatus" + "storj.io/common/storj" +) + +// ErrBucketNameInvalid is returned when the bucket name is invalid. +var ErrBucketNameInvalid = errors.New("bucket name invalid") + +// ErrBucketAlreadyExists is returned when the bucket already exists during creation. +var ErrBucketAlreadyExists = errors.New("bucket already exists") + +// ErrBucketNotEmpty is returned when the bucket is not empty during deletion. +var ErrBucketNotEmpty = errors.New("bucket not empty") + +// ErrBucketNotFound is returned when the bucket is not found. +var ErrBucketNotFound = errors.New("bucket not found") + +// Bucket contains information about the bucket. +type Bucket struct { + Name string + Created time.Time +} + +// StatBucket returns information about a bucket. +func (project *Project) StatBucket(ctx context.Context, bucket string) (info *Bucket, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + b, err := project.project.GetBucket(ctx, bucket) + if err != nil { + if storj.ErrNoBucket.Has(err) { + return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) + } else if storj.ErrBucketNotFound.Has(err) { + return nil, errwrapf("%w (%q)", ErrBucketNotFound, bucket) + } + return nil, convertKnownErrors(err, bucket) + } + + return &Bucket{ + Name: b.Name, + Created: b.Created, + }, nil +} + +// CreateBucket creates a new bucket. +// +// When bucket already exists it returns a valid Bucket and ErrBucketExists. +func (project *Project) CreateBucket(ctx context.Context, bucket string) (created *Bucket, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + // TODO remove bucket configuration when proper fix will be deployed on satellite + b, err := project.project.CreateBucket(ctx, bucket, &storj.Bucket{ + PathCipher: storj.EncAESGCM, + DefaultRedundancyScheme: storj.RedundancyScheme{ + Algorithm: storj.ReedSolomon, + ShareSize: 256 * memory.B.Int32(), + RequiredShares: 29, + RepairShares: 35, + OptimalShares: 80, + TotalShares: 110, + }, + }) + + if err != nil { + if storj.ErrNoBucket.Has(err) { + return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) + } + if errs2.IsRPC(err, rpcstatus.AlreadyExists) { + // TODO: Ideally, the satellite should return the existing bucket when this error occurs. + existing, err := project.StatBucket(ctx, bucket) + if err != nil { + return existing, errs.Combine(errwrapf("%w (%q)", ErrBucketAlreadyExists, bucket), convertKnownErrors(err, bucket)) + } + return existing, errwrapf("%w (%q)", ErrBucketAlreadyExists, bucket) + } + return nil, convertKnownErrors(err, bucket) + } + + return &Bucket{ + Name: b.Name, + Created: b.Created, + }, nil +} + +// EnsureBucket ensures that a bucket exists or creates a new one. +// +// When bucket already exists it returns a valid Bucket and no error. +func (project *Project) EnsureBucket(ctx context.Context, bucket string) (ensured *Bucket, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + ensured, err = project.CreateBucket(ctx, bucket) + if err != nil && !errors.Is(err, ErrBucketAlreadyExists) { + return nil, convertKnownErrors(err, bucket) + } + + return ensured, nil +} + +// DeleteBucket deletes a bucket. +// +// When bucket is not empty it returns ErrBucketNotEmpty. +func (project *Project) DeleteBucket(ctx context.Context, bucket string) (deleted *Bucket, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + existing, err := project.project.DeleteBucket(ctx, bucket) + if err != nil { + if errs2.IsRPC(err, rpcstatus.FailedPrecondition) { + return nil, errwrapf("%w (%q)", ErrBucketNotEmpty, bucket) + } else if storj.ErrBucketNotFound.Has(err) { + return nil, errwrapf("%w (%q)", ErrBucketNotFound, bucket) + } else if storj.ErrNoBucket.Has(err) { + return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) + } + return nil, convertKnownErrors(err, bucket) + } + + if existing == (storj.Bucket{}) { + return nil, nil + } + + return &Bucket{ + Name: existing.Name, + Created: existing.Created, + }, nil +} diff --git a/vendor/storj.io/uplink/buckets.go b/vendor/storj.io/uplink/buckets.go new file mode 100644 index 000000000..181ee2b08 --- /dev/null +++ b/vendor/storj.io/uplink/buckets.go @@ -0,0 +1,128 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + + "storj.io/common/storj" +) + +// ListBucketsOptions defines bucket listing options. +type ListBucketsOptions struct { + // Cursor sets the starting position of the iterator. The first item listed will be the one after the cursor. + Cursor string +} + +// ListBuckets returns an iterator over the buckets. +func (project *Project) ListBuckets(ctx context.Context, options *ListBucketsOptions) *BucketIterator { + defer mon.Func().ResetTrace(&ctx)(nil) + + opts := storj.BucketListOptions{ + Direction: storj.After, + } + + if options != nil { + opts.Cursor = options.Cursor + } + + buckets := BucketIterator{ + ctx: ctx, + project: project, + options: opts, + } + + return &buckets +} + +// BucketIterator is an iterator over a collection of buckets. +type BucketIterator struct { + ctx context.Context + project *Project + options storj.BucketListOptions + list *storj.BucketList + position int + completed bool + err error +} + +// Next prepares next Bucket for reading. +// It returns false if the end of the iteration is reached and there are no more buckets, or if there is an error. +func (buckets *BucketIterator) Next() bool { + if buckets.err != nil { + buckets.completed = true + return false + } + + if buckets.list == nil { + more := buckets.loadNext() + buckets.completed = !more + return more + } + + if buckets.position >= len(buckets.list.Items)-1 { + if !buckets.list.More { + buckets.completed = true + return false + } + more := buckets.loadNext() + buckets.completed = !more + return more + } + + buckets.position++ + + return true +} + +func (buckets *BucketIterator) loadNext() bool { + list, err := buckets.project.db.ListBuckets(buckets.ctx, buckets.options) + if err != nil { + buckets.err = err + return false + } + buckets.list = &list + if list.More { + buckets.options = buckets.options.NextPage(list) + } + buckets.position = 0 + return len(list.Items) > 0 +} + +// Err returns error, if one happened during iteration. +func (buckets *BucketIterator) Err() error { + return packageError.Wrap(buckets.err) +} + +// Item returns the current bucket in the iterator. +func (buckets *BucketIterator) Item() *Bucket { + item := buckets.item() + if item == nil { + return nil + } + return &Bucket{ + Name: item.Name, + Created: item.Created, + } +} + +func (buckets *BucketIterator) item() *storj.Bucket { + if buckets.completed { + return nil + } + + if buckets.err != nil { + return nil + } + + if buckets.list == nil { + return nil + } + + if len(buckets.list.Items) == 0 { + return nil + } + + return &buckets.list.Items[buckets.position] +} diff --git a/vendor/storj.io/uplink/common.go b/vendor/storj.io/uplink/common.go new file mode 100644 index 000000000..701737054 --- /dev/null +++ b/vendor/storj.io/uplink/common.go @@ -0,0 +1,56 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "errors" + "fmt" + "strings" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/errs2" + "storj.io/common/rpc/rpcstatus" + "storj.io/common/storj" +) + +var mon = monkit.Package() + +// Error is default error class for uplink. +var packageError = errs.Class("uplink") + +// ErrTooManyRequests is returned when user has sent too many requests in a given amount of time. +var ErrTooManyRequests = errors.New("too many requests") + +// ErrBandwidthLimitExceeded is returned when project will exceeded bandwidth limit. +var ErrBandwidthLimitExceeded = errors.New("bandwidth limit exceeded") + +func convertKnownErrors(err error, bucket string) error { + if errs2.IsRPC(err, rpcstatus.ResourceExhausted) { + // TODO is a better way to do this? + message := errs.Unwrap(err).Error() + if message == "Exceeded Usage Limit" { + return packageError.Wrap(ErrBandwidthLimitExceeded) + } else if message == "Too Many Requests" { + return packageError.Wrap(ErrTooManyRequests) + } + } else if errs2.IsRPC(err, rpcstatus.NotFound) { + message := errs.Unwrap(err).Error() + if strings.HasPrefix(message, storj.ErrBucketNotFound.New("").Error()) { + return errwrapf("%w (%q)", ErrBucketNotFound, bucket) + } else if strings.HasPrefix(message, storj.ErrObjectNotFound.New("").Error()) { + return packageError.Wrap(ErrObjectNotFound) + } + } + + return packageError.Wrap(err) +} + +func errwrapf(format string, err error, args ...interface{}) error { + var all []interface{} + all = append(all, err) + all = append(all, args...) + return packageError.Wrap(fmt.Errorf(format, all...)) +} diff --git a/vendor/storj.io/uplink/config.go b/vendor/storj.io/uplink/config.go new file mode 100644 index 000000000..848307746 --- /dev/null +++ b/vendor/storj.io/uplink/config.go @@ -0,0 +1,70 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + "time" + + "storj.io/common/identity" + "storj.io/common/macaroon" + "storj.io/common/peertls/tlsopts" + "storj.io/common/rpc" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" +) + +// Config defines configuration for using uplink library. +type Config struct { + UserAgent string + + // DialTimeout defines how long client should wait for establishing + // a connection to peers. + DialTimeout time.Duration +} + +func (config Config) dial(ctx context.Context, satelliteAddress string, apiKey *macaroon.APIKey) (_ *metainfo.Client, _ rpc.Dialer, fullNodeURL string, err error) { + ident, err := identity.NewFullIdentity(ctx, identity.NewCAOptions{ + Difficulty: 0, + Concurrency: 1, + }) + if err != nil { + return nil, rpc.Dialer{}, "", packageError.Wrap(err) + } + + tlsConfig := tlsopts.Config{ + UsePeerCAWhitelist: false, + PeerIDVersions: "0", + } + + tlsOptions, err := tlsopts.NewOptions(ident, tlsConfig, nil) + if err != nil { + return nil, rpc.Dialer{}, "", packageError.Wrap(err) + } + + dialer := rpc.NewDefaultDialer(tlsOptions) + dialer.DialTimeout = config.DialTimeout + + nodeURL, err := storj.ParseNodeURL(satelliteAddress) + if err != nil { + return nil, rpc.Dialer{}, "", packageError.Wrap(err) + } + + // Node id is required in satelliteNodeID for all unknown (non-storj) satellites. + // For known satellite it will be automatically prepended. + if nodeURL.ID.IsZero() { + nodeID, found := rpc.KnownNodeID(nodeURL.Address) + if !found { + return nil, rpc.Dialer{}, "", packageError.New("node id is required in satelliteNodeURL") + } + satelliteAddress = storj.NodeURL{ + ID: nodeID, + Address: nodeURL.Address, + }.String() + } + + metainfo, err := metainfo.DialNodeURL(ctx, dialer, satelliteAddress, apiKey, config.UserAgent) + + return metainfo, dialer, satelliteAddress, packageError.Wrap(err) +} diff --git a/vendor/storj.io/uplink/doc.go b/vendor/storj.io/uplink/doc.go new file mode 100644 index 000000000..30bff47ec --- /dev/null +++ b/vendor/storj.io/uplink/doc.go @@ -0,0 +1,120 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +/* +Package uplink is the main entrypoint to interacting with Storj Labs' decentralized +storage network. + +Sign up for an account on a Satellite today! https://tardigrade.io/satellites/ + +Access Grants + +The fundamental unit of access in the Storj Labs storage network is the Access Grant. +An access grant is a serialized structure that is internally comprised of an API Key, +a set of encryption key information, and information about which Storj Labs or +Tardigrade network Satellite is responsible for the metadata. An access grant is +always associated with exactly one Project on one Satellite. + +If you don't already have an access grant, you will need make an account on a +Satellite, generate an API Key, and encapsulate that API Key with encryption +information into an access grant. + +If you don't already have an account on a Satellite, first make one at +https://tardigrade.io/satellites/ and note the Satellite you choose (such as +us-central-1.tardigrade.io, europe-west-1.tardigrade.io, etc). Then, make an +API Key in the web interface. + +The first step to any project is to generate a restricted access grant with the +minimal permissions that are needed. Access grants contains all encryption information +and they should be restricted as much as possible. + +To make an access grant, you can create one using our Uplink CLI tool's 'share' +subcommand (after setting up the Uplink CLI tool), or you can make one as follows: + + access, err := uplink.RequestAccessWithPassphrase(ctx, satelliteAddress, apiKey, rootPassphrase) + if err != nil { + return err + } + + // create an access grant for reading bucket "logs" + permissions := uplink.ReadOnlyPermission() + shared := uplink.SharePrefix{Bucket: "logs"} + restrictedAccess, err := access.Share(permissions, shared) + if err != nil { + return err + } + + // serialize the restricted access + serializedAccess, err := restrictedAccess.Serialize() + if err != nil { + return err + } + +In the above example, 'serializedAccess' is a human-readable string that represents +read-only access to just the "logs" bucket, and is only able to decrypt that one +bucket thanks to hierarchical deterministic key derivation. + +Note: RequestAccessWithPassphrase is CPU-intensive, and your application's normal +lifecycle should avoid it and use ParseAccess where possible instead. + +Projects + +Once you have a valid access grant, you can open a Project with the access that +access grant allows for. + + project, err := uplink.OpenProject(ctx, access) + if err != nil { + return err + } + defer project.Close() + + +Projects allow you to manage buckets and objects within buckets. + +Buckets + +A bucket represents a collection of objects. You can upload, download, list, and delete objects of +any size or shape. Objects within buckets are represented by keys, where keys can optionally be +listed using the "/" delimiter. + +Note: Objects and object keys within buckets are end-to-end encrypted, but bucket names +themselves are not encrypted, so the billing interface on the Satellite can show you bucket line +items. + + buckets := project.ListBuckets(ctx, nil) + for buckets.Next() { + fmt.Println(buckets.Item().Name) + } + if err := buckets.Err(); err != nil { + return err + } + +Objects + +Objects support a couple kilobytes of arbitrary key/value metadata, and arbitrary-size primary +data streams with the ability to read at arbitrary offsets. + + object, err := project.DownloadObject(ctx, "logs", "2020-04-18/webserver.log", nil) + if err != nil { + return err + } + defer object.Close() + + _, err = io.Copy(w, object) + return err + +If you want to access only a small subrange of the data you uploaded, you can use +`uplink.DownloadOptions` to specify the download range. + + object, err := project.DownloadObject(ctx, "logs", "2020-04-18/webserver.log", + &uplink.DownloadOptions{Offset: 10, Length: 100}) + if err != nil { + return err + } + defer object.Close() + + _, err = io.Copy(w, object) + return err + +*/ +package uplink diff --git a/vendor/storj.io/uplink/download.go b/vendor/storj.io/uplink/download.go new file mode 100644 index 000000000..8750010e5 --- /dev/null +++ b/vendor/storj.io/uplink/download.go @@ -0,0 +1,81 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + + "storj.io/common/storj" + "storj.io/uplink/private/stream" +) + +// DownloadOptions contains additional options for downloading. +type DownloadOptions struct { + Offset int64 + // When Length is negative it will read until the end of the blob. + Length int64 +} + +// DownloadObject starts a download from the specific key. +func (project *Project) DownloadObject(ctx context.Context, bucket, key string, options *DownloadOptions) (download *Download, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + if bucket == "" { + return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) + } + if key == "" { + return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + } + + if options == nil { + options = &DownloadOptions{ + Offset: 0, + Length: -1, + } + } + + b := storj.Bucket{Name: bucket} + + obj, err := project.db.GetObject(ctx, b, key) + if err != nil { + if storj.ErrNoPath.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + } else if storj.ErrObjectNotFound.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectNotFound, key) + } + return nil, convertKnownErrors(err, bucket) + } + + objectStream, err := project.db.GetObjectStream(ctx, b, obj) + if err != nil { + return nil, packageError.Wrap(err) + } + + return &Download{ + download: stream.NewDownloadRange(ctx, objectStream, project.streams, options.Offset, options.Length), + object: convertObject(&obj), + }, nil +} + +// Download is a download from Storj Network. +type Download struct { + download *stream.Download + object *Object +} + +// Info returns the last information about the object. +func (download *Download) Info() *Object { + return download.object +} + +// Read downloads up to len(p) bytes into p from the object's data stream. +// It returns the number of bytes read (0 <= n <= len(p)) and any error encountered. +func (download *Download) Read(data []byte) (n int, err error) { + return download.download.Read(data) +} + +// Close closes the reader of the download. +func (download *Download) Close() error { + return download.download.Close() +} diff --git a/vendor/storj.io/uplink/encryption.go b/vendor/storj.io/uplink/encryption.go new file mode 100644 index 000000000..3dd9ce7c8 --- /dev/null +++ b/vendor/storj.io/uplink/encryption.go @@ -0,0 +1,116 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "storj.io/common/encryption" + "storj.io/common/paths" + "storj.io/common/pb" + "storj.io/common/storj" +) + +// encryptionAccess represents an encryption access context. It holds +// information about how various buckets and objects should be +// encrypted and decrypted. +type encryptionAccess struct { + store *encryption.Store +} + +// newEncryptionAccess creates an encryption access context +func newEncryptionAccess() *encryptionAccess { + store := encryption.NewStore() + return &encryptionAccess{store: store} +} + +// newEncryptionAccessWithDefaultKey creates an encryption access context with +// a default key set. +// Use (*Project).SaltedKeyFromPassphrase to generate a default key +func newEncryptionAccessWithDefaultKey(defaultKey *storj.Key) *encryptionAccess { + ec := newEncryptionAccess() + ec.setDefaultKey(defaultKey) + return ec +} + +// Store returns the underlying encryption store for the access context. +func (s *encryptionAccess) Store() *encryption.Store { + return s.store +} + +// setDefaultKey sets the default key for the encryption access context. +// Use (*Project).SaltedKeyFromPassphrase to generate a default key +func (s *encryptionAccess) setDefaultKey(defaultKey *storj.Key) { + s.store.SetDefaultKey(defaultKey) +} + +func (s *encryptionAccess) setDefaultPathCipher(defaultPathCipher storj.CipherSuite) { + s.store.SetDefaultPathCipher(defaultPathCipher) +} + +func (s *encryptionAccess) toProto() (*pb.EncryptionAccess, error) { + var storeEntries []*pb.EncryptionAccess_StoreEntry + err := s.store.IterateWithCipher(func(bucket string, unenc paths.Unencrypted, enc paths.Encrypted, key storj.Key, pathCipher storj.CipherSuite) error { + storeEntries = append(storeEntries, &pb.EncryptionAccess_StoreEntry{ + Bucket: []byte(bucket), + UnencryptedPath: []byte(unenc.Raw()), + EncryptedPath: []byte(enc.Raw()), + Key: key[:], + PathCipher: pb.CipherSuite(pathCipher), + }) + return nil + }) + if err != nil { + return nil, packageError.Wrap(err) + } + + var defaultKey []byte + if key := s.store.GetDefaultKey(); key != nil { + defaultKey = key[:] + } + + return &pb.EncryptionAccess{ + DefaultKey: defaultKey, + StoreEntries: storeEntries, + DefaultPathCipher: pb.CipherSuite(s.store.GetDefaultPathCipher()), + }, nil +} + +func parseEncryptionAccessFromProto(p *pb.EncryptionAccess) (*encryptionAccess, error) { + access := newEncryptionAccess() + if len(p.DefaultKey) > 0 { + if len(p.DefaultKey) != len(storj.Key{}) { + return nil, packageError.New("invalid default key in encryption access") + } + var defaultKey storj.Key + copy(defaultKey[:], p.DefaultKey) + access.setDefaultKey(&defaultKey) + } + + access.setDefaultPathCipher(storj.CipherSuite(p.DefaultPathCipher)) + // Unspecified cipher suite means that most probably access was serialized + // before path cipher was moved to encryption access + if p.DefaultPathCipher == pb.CipherSuite_ENC_UNSPECIFIED { + access.setDefaultPathCipher(storj.EncAESGCM) + } + + for _, entry := range p.StoreEntries { + if len(entry.Key) != len(storj.Key{}) { + return nil, packageError.New("invalid key in encryption access entry") + } + var key storj.Key + copy(key[:], entry.Key) + + err := access.store.AddWithCipher( + string(entry.Bucket), + paths.NewUnencrypted(string(entry.UnencryptedPath)), + paths.NewEncrypted(string(entry.EncryptedPath)), + key, + storj.CipherSuite(entry.PathCipher), + ) + if err != nil { + return nil, packageError.New("invalid encryption access entry: %v", err) + } + } + + return access, nil +} diff --git a/vendor/storj.io/uplink/go.mod b/vendor/storj.io/uplink/go.mod new file mode 100644 index 000000000..5457898c5 --- /dev/null +++ b/vendor/storj.io/uplink/go.mod @@ -0,0 +1,15 @@ +module storj.io/uplink + +go 1.13 + +require ( + github.com/btcsuite/btcutil v1.0.1 + github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 // indirect + github.com/spacemonkeygo/monkit/v3 v3.0.6 + github.com/stretchr/testify v1.4.0 + github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b + github.com/zeebo/errs v1.2.2 + go.uber.org/zap v1.10.0 + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e + storj.io/common v0.0.0-20200429074521-4ba140e4b747 +) diff --git a/vendor/storj.io/uplink/go.sum b/vendor/storj.io/uplink/go.sum new file mode 100644 index 000000000..f7ccbff1e --- /dev/null +++ b/vendor/storj.io/uplink/go.sum @@ -0,0 +1,154 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.1 h1:GKOz8BnRjYrb/JTKgaOk+zh26NWNdSNvdvv0xoAZMSA= +github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/calebcase/tmpfile v1.0.1 h1:vD8FSrbsbexhep39/6mvtbIHS3GzIRqiprDNCF6QqSk= +github.com/calebcase/tmpfile v1.0.1/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5 h1:l16XLUUJ34wIz+RIvLhSwGvLvKyy+W598b135bJN6mg= +github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 h1:xHQewZjohU9/wUsyC99navCjQDNHtTgUOM/J1jAbzfw= +github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1/go.mod h1:7NL9UAYQnRM5iKHUCld3tf02fKb5Dft+41+VckASUy0= +github.com/spacemonkeygo/monkit/v3 v3.0.0-20191108235033-eacca33b3037/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monkit/v3 v3.0.4 h1:Ay+PZirv+qfd4sqcT+X/U3BnC7AcIaqp/IXh0oV36k8= +github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monkit/v3 v3.0.5 h1:vMW8Ne6WAUU/OMYaSv7KGW9h/sRNgeh6TyBapOSuMhM= +github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes= +github.com/spacemonkeygo/monkit/v3 v3.0.6 h1:BKPrEaLokVAxlwHkD7jawViBa/IU9/bgXbZLWgjbdSM= +github.com/spacemonkeygo/monkit/v3 v3.0.6/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= +github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a h1:8+cCjxhToanKmxLIbuyBNe2EnpgwhiivsIaRJstDRFA= +github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b h1:dLkqBELopfQNhe8S9ucnSf+HhiUCgK/hPIjVG0f9GlY= +github.com/vivint/infectious v0.0.0-20190108171102-2455b059135b/go.mod h1:5oyMAv4hrBEKqBwORFsiqIrCNCmL2qcZLQTdJLYeYIc= +github.com/zeebo/admission/v2 v2.0.0 h1:220NPZzKmyfklysKFO95L7E2Gt5NwlxTWGE14VP8heE= +github.com/zeebo/admission/v2 v2.0.0/go.mod h1:gSeHGelDHW7Vq6UyJo2boeSt/6Dsnqpisv0i4YZSOyM= +github.com/zeebo/admission/v3 v3.0.1 h1:/IWg2jLhfjBOUhhdKcbweSzcY3QlbbE57sqvU72EpqA= +github.com/zeebo/admission/v3 v3.0.1/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw= +github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= +github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= +github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/float16 v0.1.0 h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc= +github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo= +github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ= +github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf h1:9cZxTVBvFZgOnVi/DobY3JsafbPFPnP2rtN81d4wPpw= +golang.org/x/sys v0.0.0-20200107144601-ef85f5a75ddf/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +storj.io/common v0.0.0-20200422101411-db8726ab073a h1:gKPJnNWeBL49/NNOglQL2bLt/JyMvSAow8gh5e+aDGk= +storj.io/common v0.0.0-20200422101411-db8726ab073a/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= +storj.io/common v0.0.0-20200423123959-c1b3f92807ea h1:TV7/Do6oYxdVOnp43nPyFmxxKHbWobjDc6k42zXvrx0= +storj.io/common v0.0.0-20200423123959-c1b3f92807ea/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= +storj.io/common v0.0.0-20200424175742-65ac59022f4f h1:HPRWr2HQzPD12vhHIaYhh9HT0vYlULkqA453YEV/BXU= +storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0= +storj.io/common v0.0.0-20200429074521-4ba140e4b747 h1:Ne1x0M80uNyN6tHIs15CGJqHbreKbvH5BOq4jdWsqMc= +storj.io/common v0.0.0-20200429074521-4ba140e4b747/go.mod h1:lfsaMdtHwrUOtSYkw73meCyZMUYiaFBKVqx6zgeSz2o= +storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw= +storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= diff --git a/vendor/storj.io/uplink/internal/expose/exposed.go b/vendor/storj.io/uplink/internal/expose/exposed.go new file mode 100644 index 000000000..46a6e894c --- /dev/null +++ b/vendor/storj.io/uplink/internal/expose/exposed.go @@ -0,0 +1,14 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package expose + +// RequestAccessWithPassphraseAndConcurrency exposes uplink.requestAccessWithPassphraseAndConcurrency +// +// func RequestAccessWithPassphraseAndConcurrency(ctx context.Context, config uplink.Config, satelliteNodeURL, apiKey, passphrase string, concurrency uint8) (_ *uplink.Access, err error) +var RequestAccessWithPassphraseAndConcurrency interface{} + +// EnablePathEncryptionBypass exposes uplink.enablePathEncryptionBypass +// +// func EnablePathEncryptionBypass(access *Access) error +var EnablePathEncryptionBypass interface{} diff --git a/vendor/storj.io/uplink/internal/telemetryclient/client.go b/vendor/storj.io/uplink/internal/telemetryclient/client.go new file mode 100644 index 000000000..1aeb759a3 --- /dev/null +++ b/vendor/storj.io/uplink/internal/telemetryclient/client.go @@ -0,0 +1,42 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +// Package telemetryclient is internal package to support telemetry +// without introducing a direct dependency to the actual implementation. +package telemetryclient + +import ( + "context" + + "go.uber.org/zap" +) + +type contextKey int + +const constructorKey contextKey = iota + +// Constructor creates a new telemetry client. +type Constructor func(log *zap.Logger, satelliteAddress string) (Client, error) + +// Client is the common interface for telemetry. +type Client interface { + Run(ctx context.Context) + Stop() + Report(ctx context.Context) error +} + +// WithConstructor specifies which telemetry to use. +func WithConstructor(ctx context.Context, ctor Constructor) context.Context { + return context.WithValue(ctx, constructorKey, ctor) +} + +// ConstructorFrom loads the telemetry client constructor from context. +func ConstructorFrom(ctx context.Context) (_ Constructor, ok bool) { + v := ctx.Value(constructorKey) + if v == nil { + return nil, false + } + + ctor, ok := v.(Constructor) + return ctor, ok +} diff --git a/vendor/storj.io/uplink/object.go b/vendor/storj.io/uplink/object.go new file mode 100644 index 000000000..d8dcdc67f --- /dev/null +++ b/vendor/storj.io/uplink/object.go @@ -0,0 +1,131 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + "unicode/utf8" + + "github.com/zeebo/errs" + + "storj.io/common/storj" +) + +// ErrObjectKeyInvalid is returned when the object key is invalid. +var ErrObjectKeyInvalid = errors.New("object key invalid") + +// ErrObjectNotFound is returned when the object is not found. +var ErrObjectNotFound = errors.New("object not found") + +// Object contains information about an object. +type Object struct { + Key string + // IsPrefix indicates whether the Key is a prefix for other objects. + IsPrefix bool + + System SystemMetadata + Custom CustomMetadata +} + +// SystemMetadata contains information about the object that cannot be changed directly. +type SystemMetadata struct { + Created time.Time + Expires time.Time + ContentLength int64 +} + +// CustomMetadata contains custom user metadata about the object. +// +// The keys and values in custom metadata are expected to be valid UTF-8. +// +// When choosing a custom key for your application start it with a prefix "app:key", +// as an example application named "Image Board" might use a key "image-board:title". +type CustomMetadata map[string]string + +// Clone makes a deep clone. +func (meta CustomMetadata) Clone() CustomMetadata { + r := CustomMetadata{} + for k, v := range meta { + r[k] = v + } + return r +} + +// Verify verifies whether CustomMetadata contains only "utf-8". +func (meta CustomMetadata) Verify() error { + var invalid []string + for k, v := range meta { + if !utf8.ValidString(k) || !utf8.ValidString(v) { + invalid = append(invalid, fmt.Sprintf("not utf-8 %q=%q", k, v)) + } + if strings.IndexByte(k, 0) >= 0 || strings.IndexByte(v, 0) >= 0 { + invalid = append(invalid, fmt.Sprintf("contains 0 byte: %q=%q", k, v)) + } + if k == "" { + invalid = append(invalid, "empty key") + } + } + + if len(invalid) > 0 { + return errs.New("invalid pairs %v", invalid) + } + + return nil +} + +// StatObject returns information about an object at the specific key. +func (project *Project) StatObject(ctx context.Context, bucket, key string) (info *Object, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + b := storj.Bucket{Name: bucket} + obj, err := project.db.GetObject(ctx, b, key) + if err != nil { + if storj.ErrNoPath.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + } else if storj.ErrObjectNotFound.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectNotFound, key) + } + return nil, convertKnownErrors(err, bucket) + } + + return convertObject(&obj), nil +} + +// DeleteObject deletes the object at the specific key. +func (project *Project) DeleteObject(ctx context.Context, bucket, key string) (deleted *Object, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + b := storj.Bucket{Name: bucket} + obj, err := project.db.DeleteObject(ctx, b, key) + if err != nil { + if storj.ErrNoPath.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + } else if storj.ErrObjectNotFound.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectNotFound, key) + } + return nil, convertKnownErrors(err, bucket) + } + return convertObject(&obj), nil +} + +// convertObject converts storj.Object to uplink.Object. +func convertObject(obj *storj.Object) *Object { + if obj.Bucket.Name == "" { // zero object + return nil + } + + return &Object{ + Key: obj.Path, + System: SystemMetadata{ + Created: obj.Created, + Expires: obj.Expires, + ContentLength: obj.Size, + }, + Custom: obj.Metadata, + } +} diff --git a/vendor/storj.io/uplink/objects.go b/vendor/storj.io/uplink/objects.go new file mode 100644 index 000000000..cfae127d3 --- /dev/null +++ b/vendor/storj.io/uplink/objects.go @@ -0,0 +1,169 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + + "storj.io/common/storj" +) + +// ListObjectsOptions defines object listing options. +type ListObjectsOptions struct { + // Prefix allows to filter objects by a key prefix. If not empty, it must end with slash. + Prefix string + // Cursor sets the starting position of the iterator. The first item listed will be the one after the cursor. + Cursor string + // Recursive iterates the objects without collapsing prefixes. + Recursive bool + + // System includes SystemMetadata in the results. + System bool + // Custom includes CustomMetadata in the results. + Custom bool +} + +// ListObjects returns an iterator over the objects. +func (project *Project) ListObjects(ctx context.Context, bucket string, options *ListObjectsOptions) *ObjectIterator { + defer mon.Func().ResetTrace(&ctx)(nil) + + b := storj.Bucket{Name: bucket, PathCipher: storj.EncAESGCM} + opts := storj.ListOptions{ + Direction: storj.After, + } + + if options != nil { + opts.Prefix = options.Prefix + opts.Cursor = options.Cursor + opts.Recursive = options.Recursive + } + + objects := ObjectIterator{ + ctx: ctx, + project: project, + bucket: b, + options: opts, + } + + if options != nil { + objects.objOptions = *options + } + + return &objects +} + +// ObjectIterator is an iterator over a collection of objects or prefixes. +type ObjectIterator struct { + ctx context.Context + project *Project + bucket storj.Bucket + options storj.ListOptions + objOptions ListObjectsOptions + list *storj.ObjectList + position int + completed bool + err error +} + +// Next prepares next Object for reading. +// It returns false if the end of the iteration is reached and there are no more objects, or if there is an error. +func (objects *ObjectIterator) Next() bool { + if objects.err != nil { + objects.completed = true + return false + } + + if objects.list == nil { + more := objects.loadNext() + objects.completed = !more + return more + } + + if objects.position >= len(objects.list.Items)-1 { + if !objects.list.More { + objects.completed = true + return false + } + more := objects.loadNext() + objects.completed = !more + return more + } + + objects.position++ + + return true +} + +func (objects *ObjectIterator) loadNext() bool { + list, err := objects.project.db.ListObjectsExtended(objects.ctx, objects.bucket, objects.options) + if err != nil { + objects.err = convertKnownErrors(err, objects.bucket.Name) + return false + } + objects.list = &list + if list.More { + objects.options = objects.options.NextPage(list) + } + objects.position = 0 + return len(list.Items) > 0 +} + +// Err returns error, if one happened during iteration. +func (objects *ObjectIterator) Err() error { + return packageError.Wrap(objects.err) +} + +// Item returns the current object in the iterator. +func (objects *ObjectIterator) Item() *Object { + item := objects.item() + if item == nil { + return nil + } + + key := item.Path + if len(objects.options.Prefix) > 0 { + key = objects.options.Prefix + item.Path + } + + obj := Object{ + Key: key, + IsPrefix: item.IsPrefix, + } + + // TODO: Make this filtering on the satellite + if objects.objOptions.System { + obj.System = SystemMetadata{ + Created: item.Created, + Expires: item.Expires, + ContentLength: item.Size, + } + } + + // TODO: Make this filtering on the satellite + if objects.objOptions.Custom { + obj.Custom = item.Metadata + } + + return &obj +} + +func (objects *ObjectIterator) item() *storj.Object { + if objects.completed { + return nil + } + + if objects.err != nil { + return nil + } + + if objects.list == nil { + return nil + } + + if len(objects.list.Items) == 0 { + return nil + } + + return &objects.list.Items[objects.position] +} diff --git a/vendor/storj.io/uplink/private/ecclient/client.go b/vendor/storj.io/uplink/private/ecclient/client.go new file mode 100644 index 000000000..870692dc9 --- /dev/null +++ b/vendor/storj.io/uplink/private/ecclient/client.go @@ -0,0 +1,418 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package ecclient + +import ( + "context" + "io" + "io/ioutil" + "sort" + "sync" + "time" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + "go.uber.org/zap" + + "storj.io/common/encryption" + "storj.io/common/errs2" + "storj.io/common/identity" + "storj.io/common/pb" + "storj.io/common/ranger" + "storj.io/common/rpc" + "storj.io/common/storj" + "storj.io/uplink/private/eestream" + "storj.io/uplink/private/piecestore" +) + +var mon = monkit.Package() + +// Client defines an interface for storing erasure coded data to piece store nodes +type Client interface { + Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) + Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (ranger.Ranger, error) + WithForceErrorDetection(force bool) Client + // PutPiece is not intended to be used by normal uplinks directly, but is exported to support storagenode graceful exit transfers. + PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, id *identity.PeerIdentity, err error) +} + +type dialPiecestoreFunc func(context.Context, *pb.Node) (*piecestore.Client, error) + +type ecClient struct { + log *zap.Logger + dialer rpc.Dialer + memoryLimit int + forceErrorDetection bool +} + +// NewClient from the given identity and max buffer memory +func NewClient(log *zap.Logger, dialer rpc.Dialer, memoryLimit int) Client { + return &ecClient{ + log: log, + dialer: dialer, + memoryLimit: memoryLimit, + } +} + +func (ec *ecClient) WithForceErrorDetection(force bool) Client { + ec.forceErrorDetection = force + return ec +} + +func (ec *ecClient) dialPiecestore(ctx context.Context, n *pb.Node) (*piecestore.Client, error) { + logger := ec.log.Named(n.Id.String()) + return piecestore.Dial(ctx, ec.dialer, n, logger, piecestore.DefaultConfig) +} + +func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) { + defer mon.Task()(&ctx)(&err) + + pieceCount := len(limits) + if pieceCount != rs.TotalCount() { + return nil, nil, Error.New("size of limits slice (%d) does not match total count (%d) of erasure scheme", pieceCount, rs.TotalCount()) + } + + nonNilLimits := nonNilCount(limits) + if nonNilLimits <= rs.RepairThreshold() && nonNilLimits < rs.OptimalThreshold() { + return nil, nil, Error.New("number of non-nil limits (%d) is less than or equal to the repair threshold (%d) of erasure scheme", nonNilLimits, rs.RepairThreshold()) + } + + if !unique(limits) { + return nil, nil, Error.New("duplicated nodes are not allowed") + } + + ec.log.Debug("Uploading to storage nodes", + zap.Int("Erasure Share Size", rs.ErasureShareSize()), + zap.Int("Stripe Size", rs.StripeSize()), + zap.Int("Repair Threshold", rs.RepairThreshold()), + zap.Int("Optimal Threshold", rs.OptimalThreshold()), + ) + + padded := encryption.PadReader(ioutil.NopCloser(data), rs.StripeSize()) + readers, err := eestream.EncodeReader(ctx, ec.log, padded, rs) + if err != nil { + return nil, nil, err + } + + type info struct { + i int + err error + hash *pb.PieceHash + } + infos := make(chan info, pieceCount) + + psCtx, cancel := context.WithCancel(ctx) + defer cancel() + + for i, addressedLimit := range limits { + go func(i int, addressedLimit *pb.AddressedOrderLimit) { + hash, _, err := ec.PutPiece(psCtx, ctx, addressedLimit, privateKey, readers[i]) + infos <- info{i: i, err: err, hash: hash} + }(i, addressedLimit) + } + + successfulNodes = make([]*pb.Node, pieceCount) + successfulHashes = make([]*pb.PieceHash, pieceCount) + var successfulCount, failureCount, cancellationCount int32 + for range limits { + info := <-infos + + if limits[info.i] == nil { + continue + } + + if info.err != nil { + if !errs2.IsCanceled(info.err) { + failureCount++ + } else { + cancellationCount++ + } + ec.log.Debug("Upload to storage node failed", + zap.Stringer("Node ID", limits[info.i].GetLimit().StorageNodeId), + zap.Error(info.err), + ) + continue + } + + successfulNodes[info.i] = &pb.Node{ + Id: limits[info.i].GetLimit().StorageNodeId, + Address: limits[info.i].GetStorageNodeAddress(), + } + successfulHashes[info.i] = info.hash + + successfulCount++ + if int(successfulCount) >= rs.OptimalThreshold() { + ec.log.Debug("Success threshold reached. Cancelling remaining uploads.", + zap.Int("Optimal Threshold", rs.OptimalThreshold()), + ) + cancel() + } + } + + defer func() { + select { + case <-ctx.Done(): + err = Error.New("upload cancelled by user") + default: + } + }() + + mon.IntVal("put_segment_pieces_total").Observe(int64(pieceCount)) + mon.IntVal("put_segment_pieces_optimal").Observe(int64(rs.OptimalThreshold())) + mon.IntVal("put_segment_pieces_successful").Observe(int64(successfulCount)) + mon.IntVal("put_segment_pieces_failed").Observe(int64(failureCount)) + mon.IntVal("put_segment_pieces_canceled").Observe(int64(cancellationCount)) + + if int(successfulCount) <= rs.RepairThreshold() && int(successfulCount) < rs.OptimalThreshold() { + return nil, nil, Error.New("successful puts (%d) less than or equal to repair threshold (%d)", successfulCount, rs.RepairThreshold()) + } + + if int(successfulCount) < rs.OptimalThreshold() { + return nil, nil, Error.New("successful puts (%d) less than success threshold (%d)", successfulCount, rs.OptimalThreshold()) + } + + return successfulNodes, successfulHashes, nil +} + +func (ec *ecClient) PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, peerID *identity.PeerIdentity, err error) { + nodeName := "nil" + if limit != nil { + nodeName = limit.GetLimit().StorageNodeId.String()[0:8] + } + defer mon.Task()(&ctx, "node: "+nodeName)(&err) + defer func() { err = errs.Combine(err, data.Close()) }() + + if limit == nil { + _, _ = io.Copy(ioutil.Discard, data) + return nil, nil, nil + } + + storageNodeID := limit.GetLimit().StorageNodeId + pieceID := limit.GetLimit().PieceId + ps, err := ec.dialPiecestore(ctx, &pb.Node{ + Id: storageNodeID, + Address: limit.GetStorageNodeAddress(), + }) + if err != nil { + ec.log.Debug("Failed dialing for putting piece to node", + zap.Stringer("Piece ID", pieceID), + zap.Stringer("Node ID", storageNodeID), + zap.Error(err), + ) + return nil, nil, err + } + defer func() { err = errs.Combine(err, ps.Close()) }() + + peerID, err = ps.GetPeerIdentity() + if err != nil { + ec.log.Debug("Failed getting peer identity from node connection", + zap.Stringer("Node ID", storageNodeID), + zap.Error(err), + ) + return nil, nil, err + } + + hash, err = ps.UploadReader(ctx, limit.GetLimit(), privateKey, data) + if err != nil { + if ctx.Err() == context.Canceled { + // Canceled context means the piece upload was interrupted by user or due + // to slow connection. No error logging for this case. + if parent.Err() == context.Canceled { + ec.log.Info("Upload to node canceled by user", zap.Stringer("Node ID", storageNodeID)) + } else { + ec.log.Debug("Node cut from upload due to slow connection", zap.Stringer("Node ID", storageNodeID)) + } + + // make sure context.Canceled is the primary error in the error chain + // for later errors.Is/errs2.IsCanceled checking + err = errs.Combine(context.Canceled, err) + + } else { + nodeAddress := "" + if limit.GetStorageNodeAddress() != nil { + nodeAddress = limit.GetStorageNodeAddress().GetAddress() + } + + ec.log.Debug("Failed uploading piece to node", + zap.Stringer("Piece ID", pieceID), + zap.Stringer("Node ID", storageNodeID), + zap.String("Node Address", nodeAddress), + zap.Error(err), + ) + } + + return nil, nil, err + } + + return hash, peerID, nil +} + +func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (rr ranger.Ranger, err error) { + defer mon.Task()(&ctx)(&err) + + if len(limits) != es.TotalCount() { + return nil, Error.New("size of limits slice (%d) does not match total count (%d) of erasure scheme", len(limits), es.TotalCount()) + } + + if nonNilCount(limits) < es.RequiredCount() { + return nil, Error.New("number of non-nil limits (%d) is less than required count (%d) of erasure scheme", nonNilCount(limits), es.RequiredCount()) + } + + paddedSize := calcPadded(size, es.StripeSize()) + pieceSize := paddedSize / int64(es.RequiredCount()) + + rrs := map[int]ranger.Ranger{} + for i, addressedLimit := range limits { + if addressedLimit == nil { + continue + } + + rrs[i] = &lazyPieceRanger{ + dialPiecestore: ec.dialPiecestore, + limit: addressedLimit, + privateKey: privateKey, + size: pieceSize, + } + } + + rr, err = eestream.Decode(ec.log, rrs, es, ec.memoryLimit, ec.forceErrorDetection) + if err != nil { + return nil, Error.Wrap(err) + } + + ranger, err := encryption.Unpad(rr, int(paddedSize-size)) + return ranger, Error.Wrap(err) +} + +func unique(limits []*pb.AddressedOrderLimit) bool { + if len(limits) < 2 { + return true + } + ids := make(storj.NodeIDList, len(limits)) + for i, addressedLimit := range limits { + if addressedLimit != nil { + ids[i] = addressedLimit.GetLimit().StorageNodeId + } + } + + // sort the ids and check for identical neighbors + sort.Sort(ids) + // sort.Slice(ids, func(i, k int) bool { return ids[i].Less(ids[k]) }) + for i := 1; i < len(ids); i++ { + if ids[i] != (storj.NodeID{}) && ids[i] == ids[i-1] { + return false + } + } + + return true +} + +func calcPadded(size int64, blockSize int) int64 { + mod := size % int64(blockSize) + if mod == 0 { + return size + } + return size + int64(blockSize) - mod +} + +type lazyPieceRanger struct { + dialPiecestore dialPiecestoreFunc + limit *pb.AddressedOrderLimit + privateKey storj.PiecePrivateKey + size int64 +} + +// Size implements Ranger.Size +func (lr *lazyPieceRanger) Size() int64 { + return lr.size +} + +// Range implements Ranger.Range to be lazily connected +func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + + return &lazyPieceReader{ + ranger: lr, + ctx: ctx, + offset: offset, + length: length, + }, nil +} + +type lazyPieceReader struct { + ranger *lazyPieceRanger + ctx context.Context + offset int64 + length int64 + + mu sync.Mutex + + isClosed bool + piecestore.Downloader + client *piecestore.Client +} + +func (lr *lazyPieceReader) Read(data []byte) (_ int, err error) { + lr.mu.Lock() + defer lr.mu.Unlock() + + if lr.isClosed { + return 0, io.EOF + } + if lr.Downloader == nil { + client, downloader, err := lr.ranger.dial(lr.ctx, lr.offset, lr.length) + if err != nil { + return 0, err + } + lr.Downloader = downloader + lr.client = client + } + + return lr.Downloader.Read(data) +} + +func (lr *lazyPieceRanger) dial(ctx context.Context, offset, length int64) (_ *piecestore.Client, _ piecestore.Downloader, err error) { + defer mon.Task()(&ctx)(&err) + ps, err := lr.dialPiecestore(ctx, &pb.Node{ + Id: lr.limit.GetLimit().StorageNodeId, + Address: lr.limit.GetStorageNodeAddress(), + }) + if err != nil { + return nil, nil, err + } + + download, err := ps.Download(ctx, lr.limit.GetLimit(), lr.privateKey, offset, length) + if err != nil { + return nil, nil, errs.Combine(err, ps.Close()) + } + return ps, download, nil +} + +func (lr *lazyPieceReader) Close() (err error) { + lr.mu.Lock() + defer lr.mu.Unlock() + + if lr.isClosed { + return nil + } + lr.isClosed = true + + if lr.Downloader != nil { + err = errs.Combine(err, lr.Downloader.Close()) + } + if lr.client != nil { + err = errs.Combine(err, lr.client.Close()) + } + return err +} + +func nonNilCount(limits []*pb.AddressedOrderLimit) int { + total := 0 + for _, limit := range limits { + if limit != nil { + total++ + } + } + return total +} diff --git a/vendor/storj.io/uplink/private/ecclient/common.go b/vendor/storj.io/uplink/private/ecclient/common.go new file mode 100644 index 000000000..7d734b05e --- /dev/null +++ b/vendor/storj.io/uplink/private/ecclient/common.go @@ -0,0 +1,11 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package ecclient + +import ( + "github.com/zeebo/errs" +) + +// Error is the errs class of standard Ranger errors +var Error = errs.Class("ecclient error") diff --git a/vendor/storj.io/uplink/private/eestream/common.go b/vendor/storj.io/uplink/private/eestream/common.go new file mode 100644 index 000000000..3c2a26201 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/common.go @@ -0,0 +1,11 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "github.com/zeebo/errs" +) + +// Error is the default eestream errs class +var Error = errs.Class("eestream error") diff --git a/vendor/storj.io/uplink/private/eestream/decode.go b/vendor/storj.io/uplink/private/eestream/decode.go new file mode 100644 index 000000000..cc871d562 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/decode.go @@ -0,0 +1,230 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "context" + "io" + "io/ioutil" + "sync" + + "github.com/zeebo/errs" + "go.uber.org/zap" + + "storj.io/common/encryption" + "storj.io/common/errs2" + "storj.io/common/ranger" + "storj.io/common/readcloser" +) + +type decodedReader struct { + log *zap.Logger + ctx context.Context + cancel context.CancelFunc + readers map[int]io.ReadCloser + scheme ErasureScheme + stripeReader *StripeReader + outbuf []byte + err error + currentStripe int64 + expectedStripes int64 + close sync.Once + closeErr error +} + +// DecodeReaders takes a map of readers and an ErasureScheme returning a +// combined Reader. +// +// rs is a map of erasure piece numbers to erasure piece streams. +// expectedSize is the number of bytes expected to be returned by the Reader. +// mbm is the maximum memory (in bytes) to be allocated for read buffers. If +// set to 0, the minimum possible memory will be used. +// if forceErrorDetection is set to true then k+1 pieces will be always +// required for decoding, so corrupted pieces can be detected. +func DecodeReaders(ctx context.Context, cancel func(), log *zap.Logger, rs map[int]io.ReadCloser, es ErasureScheme, expectedSize int64, mbm int, forceErrorDetection bool) io.ReadCloser { + defer mon.Task()(&ctx)(nil) + if expectedSize < 0 { + return readcloser.FatalReadCloser(Error.New("negative expected size")) + } + if expectedSize%int64(es.StripeSize()) != 0 { + return readcloser.FatalReadCloser( + Error.New("expected size (%d) not a factor decoded block size (%d)", + expectedSize, es.StripeSize())) + } + if err := checkMBM(mbm); err != nil { + return readcloser.FatalReadCloser(err) + } + dr := &decodedReader{ + log: log, + readers: rs, + scheme: es, + stripeReader: NewStripeReader(log, rs, es, mbm, forceErrorDetection), + outbuf: make([]byte, 0, es.StripeSize()), + expectedStripes: expectedSize / int64(es.StripeSize()), + } + dr.ctx, dr.cancel = ctx, cancel + // Kick off a goroutine to watch for context cancelation. + go func() { + <-dr.ctx.Done() + _ = dr.Close() + }() + return dr +} + +func (dr *decodedReader) Read(p []byte) (n int, err error) { + ctx := dr.ctx + + if len(dr.outbuf) == 0 { + // if the output buffer is empty, let's fill it again + // if we've already had an error, fail + if dr.err != nil { + return 0, dr.err + } + // return EOF is the expected stripes were read + if dr.currentStripe >= dr.expectedStripes { + dr.err = io.EOF + return 0, dr.err + } + // read the input buffers of the next stripe - may also decode it + dr.outbuf, dr.err = dr.stripeReader.ReadStripe(ctx, dr.currentStripe, dr.outbuf) + if dr.err != nil { + return 0, dr.err + } + dr.currentStripe++ + } + + // copy what data we have to the output + n = copy(p, dr.outbuf) + // slide the remaining bytes to the beginning + copy(dr.outbuf, dr.outbuf[n:]) + // shrink the remaining buffer + dr.outbuf = dr.outbuf[:len(dr.outbuf)-n] + return n, nil +} + +func (dr *decodedReader) Close() (err error) { + ctx := dr.ctx + defer mon.Task()(&ctx)(&err) + // cancel the context to terminate reader goroutines + dr.cancel() + errorThreshold := len(dr.readers) - dr.scheme.RequiredCount() + var closeGroup errs2.Group + // avoid double close of readers + dr.close.Do(func() { + for _, r := range dr.readers { + r := r + closeGroup.Go(func() error { + return errs2.IgnoreCanceled(r.Close()) + }) + } + + // close the stripe reader + closeGroup.Go(dr.stripeReader.Close) + + allErrors := closeGroup.Wait() + errorThreshold -= len(allErrors) + dr.closeErr = errs.Combine(allErrors...) + }) + // TODO this is workaround, we need reorganize to return multiple errors or divide into fatal, non fatal + if errorThreshold < 0 { + return dr.closeErr + } + if dr.closeErr != nil { + dr.log.Debug("decode close non fatal error: ", zap.Error(dr.closeErr)) + } + return nil +} + +type decodedRanger struct { + log *zap.Logger + es ErasureScheme + rrs map[int]ranger.Ranger + inSize int64 + mbm int // max buffer memory + forceErrorDetection bool +} + +// Decode takes a map of Rangers and an ErasureScheme and returns a combined +// Ranger. +// +// rrs is a map of erasure piece numbers to erasure piece rangers. +// mbm is the maximum memory (in bytes) to be allocated for read buffers. If +// set to 0, the minimum possible memory will be used. +// if forceErrorDetection is set to true then k+1 pieces will be always +// required for decoding, so corrupted pieces can be detected. +func Decode(log *zap.Logger, rrs map[int]ranger.Ranger, es ErasureScheme, mbm int, forceErrorDetection bool) (ranger.Ranger, error) { + if err := checkMBM(mbm); err != nil { + return nil, err + } + if len(rrs) < es.RequiredCount() { + return nil, Error.New("not enough readers to reconstruct data!") + } + size := int64(-1) + for _, rr := range rrs { + if size == -1 { + size = rr.Size() + } else if size != rr.Size() { + return nil, Error.New( + "decode failure: range reader sizes don't all match") + } + } + if size == -1 { + return ranger.ByteRanger(nil), nil + } + if size%int64(es.ErasureShareSize()) != 0 { + return nil, Error.New("invalid erasure decoder and range reader combo. "+ + "range reader size (%d) must be a multiple of erasure encoder block size (%d)", + size, es.ErasureShareSize()) + } + return &decodedRanger{ + log: log, + es: es, + rrs: rrs, + inSize: size, + mbm: mbm, + forceErrorDetection: forceErrorDetection, + }, nil +} + +func (dr *decodedRanger) Size() int64 { + blocks := dr.inSize / int64(dr.es.ErasureShareSize()) + return blocks * int64(dr.es.StripeSize()) +} + +func (dr *decodedRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + + ctx, cancel := context.WithCancel(ctx) + // offset and length might not be block-aligned. figure out which + // blocks contain this request + firstBlock, blockCount := encryption.CalcEncompassingBlocks(offset, length, dr.es.StripeSize()) + + // go ask for ranges for all those block boundaries + readers := make(map[int]io.ReadCloser, len(dr.rrs)) + for i, rr := range dr.rrs { + r, err := rr.Range(ctx, firstBlock*int64(dr.es.ErasureShareSize()), blockCount*int64(dr.es.ErasureShareSize())) + if err != nil { + readers[i] = readcloser.FatalReadCloser(err) + } else { + readers[i] = r + } + } + + // decode from all those ranges + r := DecodeReaders(ctx, cancel, dr.log, readers, dr.es, blockCount*int64(dr.es.StripeSize()), dr.mbm, dr.forceErrorDetection) + // offset might start a few bytes in, potentially discard the initial bytes + _, err = io.CopyN(ioutil.Discard, r, offset-firstBlock*int64(dr.es.StripeSize())) + if err != nil { + return nil, Error.Wrap(err) + } + // length might not have included all of the blocks, limit what we return + return readcloser.LimitReadCloser(r, length), nil +} + +func checkMBM(mbm int) error { + if mbm < 0 { + return Error.New("negative max buffer memory") + } + return nil +} diff --git a/vendor/storj.io/uplink/private/eestream/encode.go b/vendor/storj.io/uplink/private/eestream/encode.go new file mode 100644 index 000000000..421d66752 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/encode.go @@ -0,0 +1,318 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "context" + "io" + "io/ioutil" + "os" + + "github.com/vivint/infectious" + "go.uber.org/zap" + + "storj.io/common/encryption" + "storj.io/common/fpath" + "storj.io/common/memory" + "storj.io/common/pb" + "storj.io/common/ranger" + "storj.io/common/readcloser" + "storj.io/common/storj" + "storj.io/common/sync2" +) + +// ErasureScheme represents the general format of any erasure scheme algorithm. +// If this interface can be implemented, the rest of this library will work +// with it. +type ErasureScheme interface { + // Encode will take 'in' and call 'out' with erasure coded pieces. + Encode(in []byte, out func(num int, data []byte)) error + + // EncodeSingle will take 'in' with the stripe and fill 'out' with the erasure share for piece 'num'. + EncodeSingle(in, out []byte, num int) error + + // Decode will take a mapping of available erasure coded piece num -> data, + // 'in', and append the combined data to 'out', returning it. + Decode(out []byte, in map[int][]byte) ([]byte, error) + + // ErasureShareSize is the size of the erasure shares that come from Encode + // and are passed to Decode. + ErasureShareSize() int + + // StripeSize is the size the stripes that are passed to Encode and come + // from Decode. + StripeSize() int + + // Encode will generate this many erasure shares and therefore this many pieces + TotalCount() int + + // Decode requires at least this many pieces + RequiredCount() int +} + +// RedundancyStrategy is an ErasureScheme with a repair and optimal thresholds +type RedundancyStrategy struct { + ErasureScheme + repairThreshold int + optimalThreshold int +} + +// NewRedundancyStrategy from the given ErasureScheme, repair and optimal thresholds. +// +// repairThreshold is the minimum repair threshold. +// If set to 0, it will be reset to the TotalCount of the ErasureScheme. +// optimalThreshold is the optimal threshold. +// If set to 0, it will be reset to the TotalCount of the ErasureScheme. +func NewRedundancyStrategy(es ErasureScheme, repairThreshold, optimalThreshold int) (RedundancyStrategy, error) { + if repairThreshold == 0 { + repairThreshold = es.TotalCount() + } + + if optimalThreshold == 0 { + optimalThreshold = es.TotalCount() + } + if repairThreshold < 0 { + return RedundancyStrategy{}, Error.New("negative repair threshold") + } + if repairThreshold > 0 && repairThreshold < es.RequiredCount() { + return RedundancyStrategy{}, Error.New("repair threshold less than required count") + } + if repairThreshold > es.TotalCount() { + return RedundancyStrategy{}, Error.New("repair threshold greater than total count") + } + if optimalThreshold < 0 { + return RedundancyStrategy{}, Error.New("negative optimal threshold") + } + if optimalThreshold > 0 && optimalThreshold < es.RequiredCount() { + return RedundancyStrategy{}, Error.New("optimal threshold less than required count") + } + if optimalThreshold > es.TotalCount() { + return RedundancyStrategy{}, Error.New("optimal threshold greater than total count") + } + if repairThreshold > optimalThreshold { + return RedundancyStrategy{}, Error.New("repair threshold greater than optimal threshold") + } + return RedundancyStrategy{ErasureScheme: es, repairThreshold: repairThreshold, optimalThreshold: optimalThreshold}, nil +} + +// NewRedundancyStrategyFromProto creates new RedundancyStrategy from the given +// RedundancyScheme protobuf. +func NewRedundancyStrategyFromProto(scheme *pb.RedundancyScheme) (RedundancyStrategy, error) { + fc, err := infectious.NewFEC(int(scheme.GetMinReq()), int(scheme.GetTotal())) + if err != nil { + return RedundancyStrategy{}, Error.Wrap(err) + } + es := NewRSScheme(fc, int(scheme.GetErasureShareSize())) + return NewRedundancyStrategy(es, int(scheme.GetRepairThreshold()), int(scheme.GetSuccessThreshold())) +} + +// NewRedundancyStrategyFromStorj creates new RedundancyStrategy from the given +// storj.RedundancyScheme. +func NewRedundancyStrategyFromStorj(scheme storj.RedundancyScheme) (RedundancyStrategy, error) { + fc, err := infectious.NewFEC(int(scheme.RequiredShares), int(scheme.TotalShares)) + if err != nil { + return RedundancyStrategy{}, Error.Wrap(err) + } + es := NewRSScheme(fc, int(scheme.ShareSize)) + return NewRedundancyStrategy(es, int(scheme.RepairShares), int(scheme.OptimalShares)) +} + +// RepairThreshold is the number of available erasure pieces below which +// the data must be repaired to avoid loss +func (rs *RedundancyStrategy) RepairThreshold() int { + return rs.repairThreshold +} + +// OptimalThreshold is the number of available erasure pieces above which +// there is no need for the data to be repaired +func (rs *RedundancyStrategy) OptimalThreshold() int { + return rs.optimalThreshold +} + +type encodedReader struct { + log *zap.Logger + ctx context.Context + rs RedundancyStrategy + pieces map[int]*encodedPiece +} + +// EncodeReader takes a Reader and a RedundancyStrategy and returns a slice of +// io.ReadClosers. +func EncodeReader(ctx context.Context, log *zap.Logger, r io.Reader, rs RedundancyStrategy) (_ []io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + + er := &encodedReader{ + log: log, + ctx: ctx, + rs: rs, + pieces: make(map[int]*encodedPiece, rs.TotalCount()), + } + + var pipeReaders []sync2.PipeReader + var pipeWriter sync2.PipeWriter + + tempDir, inmemory, _ := fpath.GetTempData(ctx) + if inmemory { + // TODO what default inmemory size will be enough + pipeReaders, pipeWriter, err = sync2.NewTeeInmemory(rs.TotalCount(), memory.MiB.Int64()) + } else { + if tempDir == "" { + tempDir = os.TempDir() + } + pipeReaders, pipeWriter, err = sync2.NewTeeFile(rs.TotalCount(), tempDir) + } + if err != nil { + return nil, err + } + + readers := make([]io.ReadCloser, 0, rs.TotalCount()) + for i := 0; i < rs.TotalCount(); i++ { + er.pieces[i] = &encodedPiece{ + er: er, + pipeReader: pipeReaders[i], + num: i, + stripeBuf: make([]byte, rs.StripeSize()), + shareBuf: make([]byte, rs.ErasureShareSize()), + } + readers = append(readers, er.pieces[i]) + } + + go er.fillBuffer(ctx, r, pipeWriter) + + return readers, nil +} + +func (er *encodedReader) fillBuffer(ctx context.Context, r io.Reader, w sync2.PipeWriter) { + var err error + defer mon.Task()(&ctx)(&err) + _, err = sync2.Copy(ctx, w, r) + err = w.CloseWithError(err) + if err != nil { + er.log.Error("Error closing buffer pipe", zap.Error(err)) + } +} + +type encodedPiece struct { + er *encodedReader + pipeReader sync2.PipeReader + num int + currentStripe int64 + stripeBuf []byte + shareBuf []byte + available int + err error +} + +func (ep *encodedPiece) Read(p []byte) (n int, err error) { + // No need to trace this function because it's very fast and called many times. + if ep.err != nil { + return 0, ep.err + } + + if ep.available == 0 { + // take the next stripe from the segment buffer + _, err := io.ReadFull(ep.pipeReader, ep.stripeBuf) + if err != nil { + return 0, err + } + + // encode the num-th erasure share + err = ep.er.rs.EncodeSingle(ep.stripeBuf, ep.shareBuf, ep.num) + if err != nil { + return 0, err + } + + ep.currentStripe++ + ep.available = ep.er.rs.ErasureShareSize() + } + + // we have some buffer remaining for this piece. write it to the output + off := len(ep.shareBuf) - ep.available + n = copy(p, ep.shareBuf[off:]) + ep.available -= n + + return n, nil +} + +func (ep *encodedPiece) Close() (err error) { + ctx := ep.er.ctx + defer mon.Task()(&ctx)(&err) + return ep.pipeReader.Close() +} + +// EncodedRanger will take an existing Ranger and provide a means to get +// multiple Ranged sub-Readers. EncodedRanger does not match the normal Ranger +// interface. +type EncodedRanger struct { + log *zap.Logger + rr ranger.Ranger + rs RedundancyStrategy +} + +// NewEncodedRanger from the given Ranger and RedundancyStrategy. See the +// comments for EncodeReader about the repair and success thresholds. +func NewEncodedRanger(log *zap.Logger, rr ranger.Ranger, rs RedundancyStrategy) (*EncodedRanger, error) { + if rr.Size()%int64(rs.StripeSize()) != 0 { + return nil, Error.New("invalid erasure encoder and range reader combo. " + + "range reader size must be a multiple of erasure encoder block size") + } + return &EncodedRanger{ + log: log, + rs: rs, + rr: rr, + }, nil +} + +// OutputSize is like Ranger.Size but returns the Size of the erasure encoded +// pieces that come out. +func (er *EncodedRanger) OutputSize() int64 { + blocks := er.rr.Size() / int64(er.rs.StripeSize()) + return blocks * int64(er.rs.ErasureShareSize()) +} + +// Range is like Ranger.Range, but returns a slice of Readers +func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) (_ []io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + // the offset and length given may not be block-aligned, so let's figure + // out which blocks contain the request. + firstBlock, blockCount := encryption.CalcEncompassingBlocks( + offset, length, er.rs.ErasureShareSize()) + // okay, now let's encode the reader for the range containing the blocks + r, err := er.rr.Range(ctx, + firstBlock*int64(er.rs.StripeSize()), + blockCount*int64(er.rs.StripeSize())) + if err != nil { + return nil, err + } + readers, err := EncodeReader(ctx, er.log, r, er.rs) + if err != nil { + return nil, err + } + for i, r := range readers { + // the offset might start a few bytes in, so we potentially have to + // discard the beginning bytes + _, err := io.CopyN(ioutil.Discard, r, + offset-firstBlock*int64(er.rs.ErasureShareSize())) + if err != nil { + return nil, Error.Wrap(err) + } + // the length might be shorter than a multiple of the block size, so + // limit it + readers[i] = readcloser.LimitReadCloser(r, length) + } + return readers, nil +} + +// CalcPieceSize calculates what would be the piece size of the encoded data +// after erasure coding data with dataSize using the given ErasureScheme. +func CalcPieceSize(dataSize int64, scheme ErasureScheme) int64 { + const uint32Size = 4 + stripeSize := int64(scheme.StripeSize()) + stripes := (dataSize + uint32Size + stripeSize - 1) / stripeSize + + encodedSize := stripes * int64(scheme.StripeSize()) + pieceSize := encodedSize / int64(scheme.RequiredCount()) + + return pieceSize +} diff --git a/vendor/storj.io/uplink/private/eestream/piecebuf.go b/vendor/storj.io/uplink/private/eestream/piecebuf.go new file mode 100644 index 000000000..abce12157 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/piecebuf.go @@ -0,0 +1,300 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "io" + "sync" + + "go.uber.org/zap" +) + +// PieceBuffer is a synchronized buffer for storing erasure shares for a piece. +type PieceBuffer struct { + log *zap.Logger + buf []byte + shareSize int + cond *sync.Cond + newDataCond *sync.Cond + rpos, wpos int + full bool + currentShare int64 // current erasure share number + totalwr int64 // total bytes ever written to the buffer + lastwr int64 // total bytes ever written when last notified newDataCond + err error +} + +// NewPieceBuffer creates and initializes a new PieceBuffer using buf as its +// internal content. If new data is written to the buffer, newDataCond will be +// notified. +func NewPieceBuffer(log *zap.Logger, buf []byte, shareSize int, newDataCond *sync.Cond) *PieceBuffer { + return &PieceBuffer{ + log: log, + buf: buf, + shareSize: shareSize, + cond: sync.NewCond(&sync.Mutex{}), + newDataCond: newDataCond, + } +} + +// Read reads the next len(p) bytes from the buffer or until the buffer is +// drained. The return value n is the number of bytes read. If the buffer has +// no data to return and no error is set, the call will block until new data is +// written to the buffer. Otherwise the error will be returned. +func (b *PieceBuffer) Read(p []byte) (n int, err error) { + defer b.cond.Broadcast() + b.cond.L.Lock() + defer b.cond.L.Unlock() + + for b.empty() { + if b.err != nil { + return 0, b.err + } + b.cond.Wait() + } + + if b.rpos >= b.wpos { + nn := copy(p, b.buf[b.rpos:]) + n += nn + b.rpos = (b.rpos + nn) % len(b.buf) + p = p[nn:] + } + + if b.rpos < b.wpos { + nn := copy(p, b.buf[b.rpos:b.wpos]) + n += nn + b.rpos += nn + } + + if n > 0 { + b.full = false + } + + return n, nil +} + +// Skip advances the read pointer with n bytes. It the buffered number of bytes +// are less than n, the method will block until enough data is written to the +// buffer. +func (b *PieceBuffer) Skip(n int) error { + defer b.cond.Broadcast() + b.cond.L.Lock() + defer b.cond.L.Unlock() + + for n > 0 { + for b.empty() { + if b.err != nil { + return b.err + } + b.cond.Wait() + } + + if b.rpos >= b.wpos { + if len(b.buf)-b.rpos > n { + b.rpos = (b.rpos + n) % len(b.buf) + n = 0 + } else { + n -= len(b.buf) - b.rpos + b.rpos = 0 + } + } else { + if b.wpos-b.rpos > n { + b.rpos += n + n = 0 + } else { + n -= b.wpos - b.rpos + b.rpos = b.wpos + } + } + + b.full = false + } + + return nil +} + +// Write writes the contents of p into the buffer. If the buffer is full it +// will block until some data is read from it, or an error is set. The return +// value n is the number of bytes written. If an error was set, it be returned. +func (b *PieceBuffer) Write(p []byte) (n int, err error) { + for n < len(p) { + nn, err := b.write(p[n:]) + n += nn + if err != nil { + return n, err + } + // Notify for new data only if a new complete erasure share is available + b.totalwr += int64(nn) + if b.totalwr/int64(b.shareSize)-b.lastwr/int64(b.shareSize) > 0 { + b.lastwr = b.totalwr + b.notifyNewData() + } + } + return n, nil +} + +// write is a helper method that takes care for the locking on each copy +// iteration. +func (b *PieceBuffer) write(p []byte) (n int, err error) { + defer b.cond.Broadcast() + b.cond.L.Lock() + defer b.cond.L.Unlock() + + for b.full { + if b.err != nil { + return n, b.err + } + b.cond.Wait() + } + + var wr int + if b.wpos < b.rpos { + wr = copy(b.buf[b.wpos:b.rpos], p) + } else { + wr = copy(b.buf[b.wpos:], p) + } + + n += wr + b.wpos = (b.wpos + wr) % len(b.buf) + if b.wpos == b.rpos { + b.full = true + } + + return n, nil +} + +// Close sets io.ErrClosedPipe to the buffer to prevent further writes and +// blocking on read. +func (b *PieceBuffer) Close() error { + b.SetError(io.ErrClosedPipe) + return nil +} + +// SetError sets an error to be returned by Read and Write. Read will return +// the error after all data is read from the buffer. +func (b *PieceBuffer) SetError(err error) { + b.setError(err) + b.notifyNewData() +} + +// setError is a helper method that locks the mutex before setting the error. +func (b *PieceBuffer) setError(err error) { + defer b.cond.Broadcast() + b.cond.L.Lock() + defer b.cond.L.Unlock() + + b.err = err +} + +// getError is a helper method that locks the mutex before getting the error. +func (b *PieceBuffer) getError() error { + b.cond.L.Lock() + defer b.cond.L.Unlock() + + return b.err +} + +// notifyNewData notifies newDataCond that new data is written to the buffer. +func (b *PieceBuffer) notifyNewData() { + b.newDataCond.L.Lock() + defer b.newDataCond.L.Unlock() + + b.newDataCond.Broadcast() +} + +// empty chacks if the buffer is empty. +func (b *PieceBuffer) empty() bool { + return !b.full && b.rpos == b.wpos +} + +// buffered returns the number of bytes that can be read from the buffer +// without blocking. +func (b *PieceBuffer) buffered() int { + b.cond.L.Lock() + defer b.cond.L.Unlock() + + switch { + case b.rpos < b.wpos: + return b.wpos - b.rpos + case b.rpos > b.wpos: + return len(b.buf) + b.wpos - b.rpos + case b.full: + return len(b.buf) + default: // empty + return 0 + } +} + +// HasShare checks if the num-th share can be read from the buffer without +// blocking. If there are older erasure shares in the buffer, they will be +// discarded to leave room for the newer erasure shares to be written. +func (b *PieceBuffer) HasShare(num int64) bool { + if num < b.currentShare { + // we should never get here! + b.log.Fatal("Requested erasure share was already read", + zap.Int64("Requested Erasure Share", num), + zap.Int64("Current Erasure Share", b.currentShare), + ) + } + + if b.getError() != nil { + return true + } + + bufShares := int64(b.buffered() / b.shareSize) + if num-b.currentShare > 0 { + if bufShares > num-b.currentShare { + // TODO: should this error be ignored? + _ = b.discardUntil(num) + } else { + _ = b.discardUntil(b.currentShare + bufShares) + } + bufShares = int64(b.buffered() / b.shareSize) + } + + return bufShares > num-b.currentShare +} + +// ReadShare reads the num-th erasure share from the buffer into p. Any shares +// before num will be discarded from the buffer. +func (b *PieceBuffer) ReadShare(num int64, p []byte) error { + if num < b.currentShare { + // we should never get here! + b.log.Fatal("Requested erasure share was already read", + zap.Int64("Requested Erasure Share", num), + zap.Int64("Current Erasure Share", b.currentShare), + ) + } + + err := b.discardUntil(num) + if err != nil { + return err + } + + _, err = io.ReadFull(b, p) + if err != nil { + return err + } + + b.currentShare++ + + return nil +} + +// discardUntil discards all erasure shares from the buffer until the num-th +// erasure share exclusively. +func (b *PieceBuffer) discardUntil(num int64) error { + if num <= b.currentShare { + return nil + } + + err := b.Skip(int(num-b.currentShare) * b.shareSize) + if err != nil { + return err + } + + b.currentShare = num + + return nil +} diff --git a/vendor/storj.io/uplink/private/eestream/rs.go b/vendor/storj.io/uplink/private/eestream/rs.go new file mode 100644 index 000000000..f2e650f97 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/rs.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "github.com/vivint/infectious" +) + +type rsScheme struct { + fc *infectious.FEC + erasureShareSize int +} + +// NewRSScheme returns a Reed-Solomon-based ErasureScheme. +func NewRSScheme(fc *infectious.FEC, erasureShareSize int) ErasureScheme { + return &rsScheme{fc: fc, erasureShareSize: erasureShareSize} +} + +func (s *rsScheme) EncodeSingle(input, output []byte, num int) (err error) { + return s.fc.EncodeSingle(input, output, num) +} + +func (s *rsScheme) Encode(input []byte, output func(num int, data []byte)) ( + err error) { + return s.fc.Encode(input, func(s infectious.Share) { + output(s.Number, s.Data) + }) +} + +func (s *rsScheme) Decode(out []byte, in map[int][]byte) ([]byte, error) { + shares := make([]infectious.Share, 0, len(in)) + for num, data := range in { + shares = append(shares, infectious.Share{Number: num, Data: data}) + } + return s.fc.Decode(out, shares) +} + +func (s *rsScheme) ErasureShareSize() int { + return s.erasureShareSize +} + +func (s *rsScheme) StripeSize() int { + return s.erasureShareSize * s.fc.Required() +} + +func (s *rsScheme) TotalCount() int { + return s.fc.Total() +} + +func (s *rsScheme) RequiredCount() int { + return s.fc.Required() +} diff --git a/vendor/storj.io/uplink/private/eestream/stripe.go b/vendor/storj.io/uplink/private/eestream/stripe.go new file mode 100644 index 000000000..62b91bec4 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/stripe.go @@ -0,0 +1,182 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + "sync" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/vivint/infectious" + "go.uber.org/zap" +) + +var ( + mon = monkit.Package() +) + +// StripeReader can read and decodes stripes from a set of readers +type StripeReader struct { + scheme ErasureScheme + cond *sync.Cond + readerCount int + bufs map[int]*PieceBuffer + inbufs map[int][]byte + inmap map[int][]byte + errmap map[int]error + forceErrorDetection bool +} + +// NewStripeReader creates a new StripeReader from the given readers, erasure +// scheme and max buffer memory. +func NewStripeReader(log *zap.Logger, rs map[int]io.ReadCloser, es ErasureScheme, mbm int, forceErrorDetection bool) *StripeReader { + readerCount := len(rs) + + r := &StripeReader{ + scheme: es, + cond: sync.NewCond(&sync.Mutex{}), + readerCount: readerCount, + bufs: make(map[int]*PieceBuffer, readerCount), + inbufs: make(map[int][]byte, readerCount), + inmap: make(map[int][]byte, readerCount), + errmap: make(map[int]error, readerCount), + forceErrorDetection: forceErrorDetection, + } + + bufSize := mbm / readerCount + bufSize -= bufSize % es.ErasureShareSize() + if bufSize < es.ErasureShareSize() { + bufSize = es.ErasureShareSize() + } + + for i := range rs { + r.inbufs[i] = make([]byte, es.ErasureShareSize()) + r.bufs[i] = NewPieceBuffer(log, make([]byte, bufSize), es.ErasureShareSize(), r.cond) + // Kick off a goroutine each reader to be copied into a PieceBuffer. + go func(r io.Reader, buf *PieceBuffer) { + _, err := io.Copy(buf, r) + if err != nil { + buf.SetError(err) + return + } + buf.SetError(io.EOF) + }(rs[i], r.bufs[i]) + } + + return r +} + +// Close closes the StripeReader and all PieceBuffers. +func (r *StripeReader) Close() error { + errs := make(chan error, len(r.bufs)) + for _, buf := range r.bufs { + go func(c io.Closer) { + errs <- c.Close() + }(buf) + } + var first error + for range r.bufs { + err := <-errs + if err != nil && first == nil { + first = Error.Wrap(err) + } + } + return first +} + +var backcompatMon = monkit.ScopeNamed("storj.io/storj/uplink/eestream") + +// ReadStripe reads and decodes the num-th stripe and concatenates it to p. The +// return value is the updated byte slice. +func (r *StripeReader) ReadStripe(ctx context.Context, num int64, p []byte) (_ []byte, err error) { + for i := range r.inmap { + delete(r.inmap, i) + } + + r.cond.L.Lock() + defer r.cond.L.Unlock() + + for r.pendingReaders() { + for r.readAvailableShares(ctx, num) == 0 { + r.cond.Wait() + } + if r.hasEnoughShares() { + out, err := r.scheme.Decode(p, r.inmap) + if err != nil { + if r.shouldWaitForMore(err) { + continue + } + return nil, err + } + return out, nil + } + } + // could not read enough shares to attempt a decode + backcompatMon.Meter("download_stripe_failed_not_enough_pieces_uplink").Mark(1) //locked + return nil, r.combineErrs(num) +} + +// readAvailableShares reads the available num-th erasure shares from the piece +// buffers without blocking. The return value n is the number of erasure shares +// read. +func (r *StripeReader) readAvailableShares(ctx context.Context, num int64) (n int) { + for i, buf := range r.bufs { + if r.inmap[i] != nil || r.errmap[i] != nil { + continue + } + if buf.HasShare(num) { + err := buf.ReadShare(num, r.inbufs[i]) + if err != nil { + r.errmap[i] = err + } else { + r.inmap[i] = r.inbufs[i] + } + n++ + } + } + return n +} + +// pendingReaders checks if there are any pending readers to get a share from. +func (r *StripeReader) pendingReaders() bool { + goodReaders := r.readerCount - len(r.errmap) + return goodReaders >= r.scheme.RequiredCount() && goodReaders > len(r.inmap) +} + +// hasEnoughShares check if there are enough erasure shares read to attempt +// a decode. +func (r *StripeReader) hasEnoughShares() bool { + return len(r.inmap) >= r.scheme.RequiredCount()+1 || + (!r.forceErrorDetection && len(r.inmap) == r.scheme.RequiredCount() && !r.pendingReaders()) +} + +// shouldWaitForMore checks the returned decode error if it makes sense to wait +// for more erasure shares to attempt an error correction. +func (r *StripeReader) shouldWaitForMore(err error) bool { + // check if the error is due to error detection + if !infectious.NotEnoughShares.Contains(err) && + !infectious.TooManyErrors.Contains(err) { + return false + } + // check if there are more input buffers to wait for + return r.pendingReaders() +} + +// combineErrs makes a useful error message from the errors in errmap. +// combineErrs always returns an error. +func (r *StripeReader) combineErrs(num int64) error { + if len(r.errmap) == 0 { + return Error.New("programmer error: no errors to combine") + } + errstrings := make([]string, 0, len(r.errmap)) + for i, err := range r.errmap { + errstrings = append(errstrings, fmt.Sprintf("\nerror retrieving piece %02d: %v", i, err)) + } + sort.Strings(errstrings) + return Error.New("failed to download stripe %d: %s", num, strings.Join(errstrings, "")) +} diff --git a/vendor/storj.io/uplink/private/eestream/unsafe_rs.go b/vendor/storj.io/uplink/private/eestream/unsafe_rs.go new file mode 100644 index 000000000..6e3611497 --- /dev/null +++ b/vendor/storj.io/uplink/private/eestream/unsafe_rs.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package eestream + +import ( + "github.com/vivint/infectious" +) + +type unsafeRSScheme struct { + fc *infectious.FEC + erasureShareSize int +} + +// NewUnsafeRSScheme returns a Reed-Solomon-based ErasureScheme without error correction. +func NewUnsafeRSScheme(fc *infectious.FEC, erasureShareSize int) ErasureScheme { + return &unsafeRSScheme{fc: fc, erasureShareSize: erasureShareSize} +} + +func (s *unsafeRSScheme) EncodeSingle(input, output []byte, num int) (err error) { + return s.fc.EncodeSingle(input, output, num) +} + +func (s *unsafeRSScheme) Encode(input []byte, output func(num int, data []byte)) ( + err error) { + return s.fc.Encode(input, func(s infectious.Share) { + output(s.Number, s.Data) + }) +} + +func (s *unsafeRSScheme) Decode(out []byte, in map[int][]byte) ([]byte, error) { + shares := make([]infectious.Share, 0, len(in)) + for num, data := range in { + shares = append(shares, infectious.Share{Number: num, Data: data}) + } + + stripe := make([]byte, s.RequiredCount()*s.ErasureShareSize()) + err := s.fc.Rebuild(shares, func(share infectious.Share) { + copy(stripe[share.Number*s.ErasureShareSize():], share.Data) + }) + if err != nil { + return nil, err + } + + return stripe, nil +} + +func (s *unsafeRSScheme) ErasureShareSize() int { + return s.erasureShareSize +} + +func (s *unsafeRSScheme) StripeSize() int { + return s.erasureShareSize * s.fc.Required() +} + +func (s *unsafeRSScheme) TotalCount() int { + return s.fc.Total() +} + +func (s *unsafeRSScheme) RequiredCount() int { + return s.fc.Required() +} diff --git a/vendor/storj.io/uplink/private/metainfo/batch.go b/vendor/storj.io/uplink/private/metainfo/batch.go new file mode 100644 index 000000000..9908a2694 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/batch.go @@ -0,0 +1,149 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package metainfo + +import ( + "github.com/zeebo/errs" + + "storj.io/common/pb" + "storj.io/uplink/private/eestream" +) + +var ( + // ErrInvalidType error for inalid response type casting + ErrInvalidType = errs.New("invalid response type") +) + +// BatchItem represents single request in batch +type BatchItem interface { + BatchItem() *pb.BatchRequestItem +} + +// BatchResponse single response from batch call +type BatchResponse struct { + pbRequest interface{} + pbResponse interface{} +} + +// CreateBucket returns BatchResponse for CreateBucket request +func (resp *BatchResponse) CreateBucket() (CreateBucketResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketCreate) + if !ok { + return CreateBucketResponse{}, ErrInvalidType + } + + createResponse, err := newCreateBucketResponse(item.BucketCreate) + if err != nil { + return CreateBucketResponse{}, err + } + return createResponse, nil +} + +// GetBucket returns response for GetBucket request +func (resp *BatchResponse) GetBucket() (GetBucketResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketGet) + if !ok { + return GetBucketResponse{}, ErrInvalidType + } + getResponse, err := newGetBucketResponse(item.BucketGet) + if err != nil { + return GetBucketResponse{}, err + } + return getResponse, nil +} + +// ListBuckets returns response for ListBuckets request +func (resp *BatchResponse) ListBuckets() (ListBucketsResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketList) + if !ok { + return ListBucketsResponse{}, ErrInvalidType + } + return newListBucketsResponse(item.BucketList), nil +} + +// BeginObject returns response for BeginObject request +func (resp *BatchResponse) BeginObject() (BeginObjectResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBegin) + if !ok { + return BeginObjectResponse{}, ErrInvalidType + } + + rs, err := eestream.NewRedundancyStrategyFromProto(item.ObjectBegin.RedundancyScheme) + if err != nil { + return BeginObjectResponse{}, Error.Wrap(err) + } + + return newBeginObjectResponse(item.ObjectBegin, rs), nil +} + +// BeginDeleteObject returns response for BeginDeleteObject request +func (resp *BatchResponse) BeginDeleteObject() (BeginDeleteObjectResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBeginDelete) + if !ok { + return BeginDeleteObjectResponse{}, ErrInvalidType + } + return newBeginDeleteObjectResponse(item.ObjectBeginDelete), nil +} + +// GetObject returns response for GetObject request +func (resp *BatchResponse) GetObject() (GetObjectResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectGet) + if !ok { + return GetObjectResponse{}, ErrInvalidType + } + return newGetObjectResponse(item.ObjectGet), nil +} + +// ListObjects returns response for ListObjects request +func (resp *BatchResponse) ListObjects() (ListObjectsResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectList) + if !ok { + return ListObjectsResponse{}, ErrInvalidType + } + + requestItem, ok := resp.pbRequest.(*pb.BatchRequestItem_ObjectList) + if !ok { + return ListObjectsResponse{}, ErrInvalidType + } + + return newListObjectsResponse(item.ObjectList, requestItem.ObjectList.EncryptedPrefix, requestItem.ObjectList.Recursive), nil +} + +// BeginSegment returns response for BeginSegment request +func (resp *BatchResponse) BeginSegment() (BeginSegmentResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBegin) + if !ok { + return BeginSegmentResponse{}, ErrInvalidType + } + + return newBeginSegmentResponse(item.SegmentBegin), nil +} + +// BeginDeleteSegment returns response for BeginDeleteSegment request +func (resp *BatchResponse) BeginDeleteSegment() (BeginDeleteSegmentResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBeginDelete) + if !ok { + return BeginDeleteSegmentResponse{}, ErrInvalidType + } + + return newBeginDeleteSegmentResponse(item.SegmentBeginDelete), nil +} + +// ListSegment returns response for ListSegment request +func (resp *BatchResponse) ListSegment() (ListSegmentsResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentList) + if !ok { + return ListSegmentsResponse{}, ErrInvalidType + } + return newListSegmentsResponse(item.SegmentList), nil +} + +// DownloadSegment returns response for DownloadSegment request +func (resp *BatchResponse) DownloadSegment() (DownloadSegmentResponse, error) { + item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentDownload) + if !ok { + return DownloadSegmentResponse{}, ErrInvalidType + } + return newDownloadSegmentResponse(item.SegmentDownload), nil +} diff --git a/vendor/storj.io/uplink/private/metainfo/client.go b/vendor/storj.io/uplink/private/metainfo/client.go new file mode 100644 index 000000000..3473885de --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/client.go @@ -0,0 +1,1199 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package metainfo + +import ( + "bytes" + "context" + "time" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/errs2" + "storj.io/common/macaroon" + "storj.io/common/pb" + "storj.io/common/rpc" + "storj.io/common/rpc/rpcstatus" + "storj.io/common/storj" + "storj.io/common/uuid" + "storj.io/uplink/private/eestream" +) + +var ( + mon = monkit.Package() + + // Error is the errs class of standard metainfo errors. + Error = errs.Class("metainfo error") +) + +// Client creates a grpcClient. +type Client struct { + conn *rpc.Conn + client pb.DRPCMetainfoClient + apiKeyRaw []byte + + userAgent string +} + +// ListItem is a single item in a listing. +type ListItem struct { + Path storj.Path + Pointer *pb.Pointer + IsPrefix bool +} + +// New used as a public function. +func New(client pb.DRPCMetainfoClient, apiKey *macaroon.APIKey, userAgent string) *Client { + return &Client{ + client: client, + apiKeyRaw: apiKey.SerializeRaw(), + + userAgent: userAgent, + } +} + +// Dial dials to metainfo endpoint with the specified api key. +func Dial(ctx context.Context, dialer rpc.Dialer, address string, apiKey *macaroon.APIKey, userAgent string) (*Client, error) { + conn, err := dialer.DialAddressInsecureBestEffort(ctx, address) + if err != nil { + return nil, Error.Wrap(err) + } + + return &Client{ + conn: conn, + client: pb.NewDRPCMetainfoClient(conn), + apiKeyRaw: apiKey.SerializeRaw(), + userAgent: userAgent, + }, nil +} + +// DialNodeURL dials to metainfo endpoint with the specified api key. +func DialNodeURL(ctx context.Context, dialer rpc.Dialer, nodeURL string, apiKey *macaroon.APIKey, userAgent string) (*Client, error) { + url, err := storj.ParseNodeURL(nodeURL) + if err != nil { + return nil, Error.Wrap(err) + } + + if url.ID.IsZero() { + return nil, Error.New("node ID is required in node URL %q", nodeURL) + } + + conn, err := dialer.DialAddressID(ctx, url.Address, url.ID) + if err != nil { + return nil, Error.Wrap(err) + } + + return &Client{ + conn: conn, + client: pb.NewDRPCMetainfoClient(conn), + apiKeyRaw: apiKey.SerializeRaw(), + userAgent: userAgent, + }, nil +} + +// Close closes the dialed connection. +func (client *Client) Close() error { + if client.conn != nil { + return Error.Wrap(client.conn.Close()) + } + return nil +} + +func (client *Client) header() *pb.RequestHeader { + return &pb.RequestHeader{ + ApiKey: client.apiKeyRaw, + UserAgent: []byte(client.userAgent), + } +} + +// GetProjectInfo gets the ProjectInfo for the api key associated with the metainfo client. +func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoResponse, err error) { + defer mon.Task()(&ctx)(&err) + + return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{ + Header: client.header(), + }) +} + +// CreateBucketParams parameters for CreateBucket method. +type CreateBucketParams struct { + Name []byte + PathCipher storj.CipherSuite + PartnerID []byte + DefaultSegmentsSize int64 + DefaultRedundancyScheme storj.RedundancyScheme + DefaultEncryptionParameters storj.EncryptionParameters +} + +func (params *CreateBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketCreateRequest { + defaultRS := params.DefaultRedundancyScheme + defaultEP := params.DefaultEncryptionParameters + + return &pb.BucketCreateRequest{ + Header: header, + Name: params.Name, + PathCipher: pb.CipherSuite(params.PathCipher), + PartnerId: params.PartnerID, + DefaultSegmentSize: params.DefaultSegmentsSize, + DefaultRedundancyScheme: &pb.RedundancyScheme{ + Type: pb.RedundancyScheme_SchemeType(defaultRS.Algorithm), + MinReq: int32(defaultRS.RequiredShares), + Total: int32(defaultRS.TotalShares), + RepairThreshold: int32(defaultRS.RepairShares), + SuccessThreshold: int32(defaultRS.OptimalShares), + ErasureShareSize: defaultRS.ShareSize, + }, + DefaultEncryptionParameters: &pb.EncryptionParameters{ + CipherSuite: pb.CipherSuite(defaultEP.CipherSuite), + BlockSize: int64(defaultEP.BlockSize), + }, + } +} + +// BatchItem returns single item for batch request. +func (params *CreateBucketParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_BucketCreate{ + BucketCreate: params.toRequest(nil), + }, + } +} + +// TODO potential names *Response/*Out/*Result + +// CreateBucketResponse response for CreateBucket request. +type CreateBucketResponse struct { + Bucket storj.Bucket +} + +func newCreateBucketResponse(response *pb.BucketCreateResponse) (CreateBucketResponse, error) { + bucket, err := convertProtoToBucket(response.Bucket) + if err != nil { + return CreateBucketResponse{}, err + } + return CreateBucketResponse{ + Bucket: bucket, + }, nil +} + +// CreateBucket creates a new bucket. +func (client *Client) CreateBucket(ctx context.Context, params CreateBucketParams) (respBucket storj.Bucket, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.CreateBucket(ctx, params.toRequest(client.header())) + if err != nil { + return storj.Bucket{}, Error.Wrap(err) + } + + respBucket, err = convertProtoToBucket(response.Bucket) + if err != nil { + return storj.Bucket{}, Error.Wrap(err) + } + return respBucket, nil +} + +// GetBucketParams parmaters for GetBucketParams method. +type GetBucketParams struct { + Name []byte +} + +func (params *GetBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketGetRequest { + return &pb.BucketGetRequest{ + Header: header, + Name: params.Name, + } +} + +// BatchItem returns single item for batch request. +func (params *GetBucketParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_BucketGet{ + BucketGet: params.toRequest(nil), + }, + } +} + +// GetBucketResponse response for GetBucket request. +type GetBucketResponse struct { + Bucket storj.Bucket +} + +func newGetBucketResponse(response *pb.BucketGetResponse) (GetBucketResponse, error) { + bucket, err := convertProtoToBucket(response.Bucket) + if err != nil { + return GetBucketResponse{}, err + } + return GetBucketResponse{ + Bucket: bucket, + }, nil +} + +// GetBucket returns a bucket. +func (client *Client) GetBucket(ctx context.Context, params GetBucketParams) (respBucket storj.Bucket, err error) { + defer mon.Task()(&ctx)(&err) + + resp, err := client.client.GetBucket(ctx, params.toRequest(client.header())) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err) + } + return storj.Bucket{}, Error.Wrap(err) + } + + respBucket, err = convertProtoToBucket(resp.Bucket) + if err != nil { + return storj.Bucket{}, Error.Wrap(err) + } + return respBucket, nil +} + +// DeleteBucketParams parmaters for DeleteBucket method. +type DeleteBucketParams struct { + Name []byte +} + +func (params *DeleteBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketDeleteRequest { + return &pb.BucketDeleteRequest{ + Header: header, + Name: params.Name, + } +} + +// BatchItem returns single item for batch request. +func (params *DeleteBucketParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_BucketDelete{ + BucketDelete: params.toRequest(nil), + }, + } +} + +// DeleteBucket deletes a bucket. +func (client *Client) DeleteBucket(ctx context.Context, params DeleteBucketParams) (_ storj.Bucket, err error) { + defer mon.Task()(&ctx)(&err) + resp, err := client.client.DeleteBucket(ctx, params.toRequest(client.header())) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err) + } + return storj.Bucket{}, Error.Wrap(err) + } + + respBucket, err := convertProtoToBucket(resp.Bucket) + if err != nil { + return storj.Bucket{}, Error.Wrap(err) + } + return respBucket, nil +} + +// ListBucketsParams parmaters for ListBucketsParams method. +type ListBucketsParams struct { + ListOpts storj.BucketListOptions +} + +func (params *ListBucketsParams) toRequest(header *pb.RequestHeader) *pb.BucketListRequest { + return &pb.BucketListRequest{ + Header: header, + Cursor: []byte(params.ListOpts.Cursor), + Limit: int32(params.ListOpts.Limit), + Direction: int32(params.ListOpts.Direction), + } +} + +// BatchItem returns single item for batch request. +func (params *ListBucketsParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_BucketList{ + BucketList: params.toRequest(nil), + }, + } +} + +// ListBucketsResponse response for ListBucket request. +type ListBucketsResponse struct { + BucketList storj.BucketList +} + +func newListBucketsResponse(response *pb.BucketListResponse) ListBucketsResponse { + bucketList := storj.BucketList{ + More: response.More, + } + bucketList.Items = make([]storj.Bucket, len(response.Items)) + for i, item := range response.GetItems() { + bucketList.Items[i] = storj.Bucket{ + Name: string(item.Name), + Created: item.CreatedAt, + } + } + return ListBucketsResponse{ + BucketList: bucketList, + } +} + +// ListBuckets lists buckets. +func (client *Client) ListBuckets(ctx context.Context, params ListBucketsParams) (_ storj.BucketList, err error) { + defer mon.Task()(&ctx)(&err) + + resp, err := client.client.ListBuckets(ctx, params.toRequest(client.header())) + if err != nil { + return storj.BucketList{}, Error.Wrap(err) + } + resultBucketList := storj.BucketList{ + More: resp.GetMore(), + } + resultBucketList.Items = make([]storj.Bucket, len(resp.GetItems())) + for i, item := range resp.GetItems() { + resultBucketList.Items[i] = storj.Bucket{ + Name: string(item.GetName()), + Created: item.GetCreatedAt(), + } + } + return resultBucketList, nil +} + +func convertProtoToBucket(pbBucket *pb.Bucket) (bucket storj.Bucket, err error) { + if pbBucket == nil { + return storj.Bucket{}, nil + } + + defaultRS := pbBucket.GetDefaultRedundancyScheme() + defaultEP := pbBucket.GetDefaultEncryptionParameters() + + var partnerID uuid.UUID + err = partnerID.UnmarshalJSON(pbBucket.GetPartnerId()) + if err != nil && !partnerID.IsZero() { + return bucket, errs.New("Invalid uuid") + } + + return storj.Bucket{ + Name: string(pbBucket.GetName()), + PartnerID: partnerID, + PathCipher: storj.CipherSuite(pbBucket.GetPathCipher()), + Created: pbBucket.GetCreatedAt(), + DefaultSegmentsSize: pbBucket.GetDefaultSegmentSize(), + DefaultRedundancyScheme: storj.RedundancyScheme{ + Algorithm: storj.RedundancyAlgorithm(defaultRS.GetType()), + ShareSize: defaultRS.GetErasureShareSize(), + RequiredShares: int16(defaultRS.GetMinReq()), + RepairShares: int16(defaultRS.GetRepairThreshold()), + OptimalShares: int16(defaultRS.GetSuccessThreshold()), + TotalShares: int16(defaultRS.GetTotal()), + }, + DefaultEncryptionParameters: storj.EncryptionParameters{ + CipherSuite: storj.CipherSuite(defaultEP.CipherSuite), + BlockSize: int32(defaultEP.BlockSize), + }, + }, nil +} + +// SetBucketAttributionParams parameters for SetBucketAttribution method. +type SetBucketAttributionParams struct { + Bucket string + PartnerID uuid.UUID +} + +func (params *SetBucketAttributionParams) toRequest(header *pb.RequestHeader) *pb.BucketSetAttributionRequest { + var bytes []byte + if !params.PartnerID.IsZero() { + bytes = params.PartnerID[:] + } + + return &pb.BucketSetAttributionRequest{ + Header: header, + Name: []byte(params.Bucket), + PartnerId: bytes, + } +} + +// BatchItem returns single item for batch request. +func (params *SetBucketAttributionParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_BucketSetAttribution{ + BucketSetAttribution: params.toRequest(nil), + }, + } +} + +// SetBucketAttribution tries to set the attribution information on the bucket. +func (client *Client) SetBucketAttribution(ctx context.Context, params SetBucketAttributionParams) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.SetBucketAttribution(ctx, params.toRequest(client.header())) + + return Error.Wrap(err) +} + +// BeginObjectParams parmaters for BeginObject method. +type BeginObjectParams struct { + Bucket []byte + EncryptedPath []byte + Version int32 + Redundancy storj.RedundancyScheme + EncryptionParameters storj.EncryptionParameters + ExpiresAt time.Time +} + +func (params *BeginObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectBeginRequest { + return &pb.ObjectBeginRequest{ + Header: header, + Bucket: params.Bucket, + EncryptedPath: params.EncryptedPath, + Version: params.Version, + ExpiresAt: params.ExpiresAt, + RedundancyScheme: &pb.RedundancyScheme{ + Type: pb.RedundancyScheme_SchemeType(params.Redundancy.Algorithm), + ErasureShareSize: params.Redundancy.ShareSize, + MinReq: int32(params.Redundancy.RequiredShares), + RepairThreshold: int32(params.Redundancy.RepairShares), + SuccessThreshold: int32(params.Redundancy.OptimalShares), + Total: int32(params.Redundancy.TotalShares), + }, + EncryptionParameters: &pb.EncryptionParameters{ + CipherSuite: pb.CipherSuite(params.EncryptionParameters.CipherSuite), + BlockSize: int64(params.EncryptionParameters.BlockSize), + }, + } +} + +// BatchItem returns single item for batch request.... +func (params *BeginObjectParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_ObjectBegin{ + ObjectBegin: params.toRequest(nil), + }, + } +} + +// BeginObjectResponse response for BeginObject request. +type BeginObjectResponse struct { + StreamID storj.StreamID + RedundancyStrategy eestream.RedundancyStrategy + EncryptionParameters storj.EncryptionParameters +} + +func newBeginObjectResponse(response *pb.ObjectBeginResponse, redundancyStrategy eestream.RedundancyStrategy) BeginObjectResponse { + ep := storj.EncryptionParameters{} + if response.EncryptionParameters != nil { + ep = storj.EncryptionParameters{ + CipherSuite: storj.CipherSuite(response.EncryptionParameters.CipherSuite), + BlockSize: int32(response.EncryptionParameters.BlockSize), + } + } + + return BeginObjectResponse{ + StreamID: response.StreamId, + RedundancyStrategy: redundancyStrategy, + EncryptionParameters: ep, + } +} + +// BeginObject begins object creation. +func (client *Client) BeginObject(ctx context.Context, params BeginObjectParams) (_ BeginObjectResponse, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.BeginObject(ctx, params.toRequest(client.header())) + if err != nil { + return BeginObjectResponse{}, Error.Wrap(err) + } + + rs := eestream.RedundancyStrategy{} + if response.RedundancyScheme != nil { + rs, err = eestream.NewRedundancyStrategyFromProto(response.RedundancyScheme) + if err != nil { + return BeginObjectResponse{}, Error.Wrap(err) + } + } + + return newBeginObjectResponse(response, rs), nil +} + +// CommitObjectParams parmaters for CommitObject method. +type CommitObjectParams struct { + StreamID storj.StreamID + + EncryptedMetadataNonce storj.Nonce + EncryptedMetadata []byte +} + +func (params *CommitObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectCommitRequest { + return &pb.ObjectCommitRequest{ + Header: header, + StreamId: params.StreamID, + EncryptedMetadataNonce: params.EncryptedMetadataNonce, + EncryptedMetadata: params.EncryptedMetadata, + } +} + +// BatchItem returns single item for batch request. +func (params *CommitObjectParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_ObjectCommit{ + ObjectCommit: params.toRequest(nil), + }, + } +} + +// CommitObject commits a created object. +func (client *Client) CommitObject(ctx context.Context, params CommitObjectParams) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.CommitObject(ctx, params.toRequest(client.header())) + + return Error.Wrap(err) +} + +// GetObjectParams parameters for GetObject method. +type GetObjectParams struct { + Bucket []byte + EncryptedPath []byte + Version int32 +} + +func (params *GetObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectGetRequest { + return &pb.ObjectGetRequest{ + Header: header, + Bucket: params.Bucket, + EncryptedPath: params.EncryptedPath, + Version: params.Version, + } +} + +// BatchItem returns single item for batch request. +func (params *GetObjectParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_ObjectGet{ + ObjectGet: params.toRequest(nil), + }, + } +} + +// GetObjectResponse response for GetObject request. +type GetObjectResponse struct { + Info storj.ObjectInfo +} + +func newGetObjectResponse(response *pb.ObjectGetResponse) GetObjectResponse { + return GetObjectResponse{ + Info: newObjectInfo(response.Object), + } +} + +func newObjectInfo(object *pb.Object) storj.ObjectInfo { + if object == nil { + return storj.ObjectInfo{} + } + + info := storj.ObjectInfo{ + Bucket: string(object.Bucket), + Path: storj.Path(object.EncryptedPath), + + StreamID: object.StreamId, + + Created: object.CreatedAt, + Modified: object.CreatedAt, + Expires: object.ExpiresAt, + Metadata: object.EncryptedMetadata, + Stream: storj.Stream{ + Size: object.TotalSize, + EncryptionParameters: storj.EncryptionParameters{ + CipherSuite: storj.CipherSuite(object.EncryptionParameters.CipherSuite), + BlockSize: int32(object.EncryptionParameters.BlockSize), + }, + }, + } + + pbRS := object.RedundancyScheme + if pbRS != nil { + info.Stream.RedundancyScheme = storj.RedundancyScheme{ + Algorithm: storj.RedundancyAlgorithm(pbRS.Type), + ShareSize: pbRS.ErasureShareSize, + RequiredShares: int16(pbRS.MinReq), + RepairShares: int16(pbRS.RepairThreshold), + OptimalShares: int16(pbRS.SuccessThreshold), + TotalShares: int16(pbRS.Total), + } + } + return info +} + +// GetObject gets single object. +func (client *Client) GetObject(ctx context.Context, params GetObjectParams) (_ storj.ObjectInfo, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.GetObject(ctx, params.toRequest(client.header())) + + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return storj.ObjectInfo{}, storj.ErrObjectNotFound.Wrap(err) + } + return storj.ObjectInfo{}, Error.Wrap(err) + } + + getResponse := newGetObjectResponse(response) + return getResponse.Info, nil +} + +// BeginDeleteObjectParams parameters for BeginDeleteObject method. +type BeginDeleteObjectParams struct { + Bucket []byte + EncryptedPath []byte + Version int32 +} + +func (params *BeginDeleteObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectBeginDeleteRequest { + return &pb.ObjectBeginDeleteRequest{ + Header: header, + Bucket: params.Bucket, + EncryptedPath: params.EncryptedPath, + Version: params.Version, + } +} + +// BatchItem returns single item for batch request. +func (params *BeginDeleteObjectParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_ObjectBeginDelete{ + ObjectBeginDelete: params.toRequest(nil), + }, + } +} + +// BeginDeleteObjectResponse response for BeginDeleteObject request. +type BeginDeleteObjectResponse struct { + StreamID storj.StreamID +} + +func newBeginDeleteObjectResponse(response *pb.ObjectBeginDeleteResponse) BeginDeleteObjectResponse { + return BeginDeleteObjectResponse{ + StreamID: response.StreamId, + } +} + +// BeginDeleteObject begins object deletion process. +func (client *Client) BeginDeleteObject(ctx context.Context, params BeginDeleteObjectParams) (_ storj.StreamID, _ storj.ObjectInfo, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.BeginDeleteObject(ctx, params.toRequest(client.header())) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return storj.StreamID{}, storj.ObjectInfo{}, storj.ErrObjectNotFound.Wrap(err) + } + return storj.StreamID{}, storj.ObjectInfo{}, Error.Wrap(err) + } + + return response.StreamId, newObjectInfo(response.Object), nil +} + +// FinishDeleteObjectParams parameters for FinishDeleteObject method. +type FinishDeleteObjectParams struct { + StreamID storj.StreamID +} + +func (params *FinishDeleteObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectFinishDeleteRequest { + return &pb.ObjectFinishDeleteRequest{ + Header: header, + StreamId: params.StreamID, + } +} + +// BatchItem returns single item for batch request. +func (params *FinishDeleteObjectParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_ObjectFinishDelete{ + ObjectFinishDelete: params.toRequest(nil), + }, + } +} + +// FinishDeleteObject finishes object deletion process. +func (client *Client) FinishDeleteObject(ctx context.Context, params FinishDeleteObjectParams) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.FinishDeleteObject(ctx, params.toRequest(client.header())) + + return Error.Wrap(err) +} + +// ListObjectsParams parameters for ListObjects method. +type ListObjectsParams struct { + Bucket []byte + EncryptedPrefix []byte + EncryptedCursor []byte + Limit int32 + IncludeMetadata bool + Recursive bool +} + +func (params *ListObjectsParams) toRequest(header *pb.RequestHeader) *pb.ObjectListRequest { + return &pb.ObjectListRequest{ + Header: header, + Bucket: params.Bucket, + EncryptedPrefix: params.EncryptedPrefix, + EncryptedCursor: params.EncryptedCursor, + Limit: params.Limit, + ObjectIncludes: &pb.ObjectListItemIncludes{ + Metadata: params.IncludeMetadata, + }, + Recursive: params.Recursive, + } +} + +// BatchItem returns single item for batch request. +func (params *ListObjectsParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_ObjectList{ + ObjectList: params.toRequest(nil), + }, + } +} + +// ListObjectsResponse response for ListObjects request. +type ListObjectsResponse struct { + Items []storj.ObjectListItem + More bool +} + +func newListObjectsResponse(response *pb.ObjectListResponse, encryptedPrefix []byte, recursive bool) ListObjectsResponse { + objects := make([]storj.ObjectListItem, len(response.Items)) + for i, object := range response.Items { + encryptedPath := object.EncryptedPath + isPrefix := false + if !recursive && len(encryptedPath) != 0 && encryptedPath[len(encryptedPath)-1] == '/' && !bytes.Equal(encryptedPath, encryptedPrefix) { + isPrefix = true + } + + objects[i] = storj.ObjectListItem{ + EncryptedPath: object.EncryptedPath, + Version: object.Version, + Status: int32(object.Status), + StatusAt: object.StatusAt, + CreatedAt: object.CreatedAt, + ExpiresAt: object.ExpiresAt, + EncryptedMetadataNonce: object.EncryptedMetadataNonce, + EncryptedMetadata: object.EncryptedMetadata, + + IsPrefix: isPrefix, + } + } + + return ListObjectsResponse{ + Items: objects, + More: response.More, + } +} + +// ListObjects lists objects according to specific parameters. +func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams) (_ []storj.ObjectListItem, more bool, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.ListObjects(ctx, params.toRequest(client.header())) + if err != nil { + return []storj.ObjectListItem{}, false, Error.Wrap(err) + } + + listResponse := newListObjectsResponse(response, params.EncryptedPrefix, params.Recursive) + return listResponse.Items, listResponse.More, Error.Wrap(err) +} + +// BeginSegmentParams parameters for BeginSegment method. +type BeginSegmentParams struct { + StreamID storj.StreamID + Position storj.SegmentPosition + MaxOrderLimit int64 +} + +func (params *BeginSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentBeginRequest { + return &pb.SegmentBeginRequest{ + Header: header, + StreamId: params.StreamID, + Position: &pb.SegmentPosition{ + PartNumber: params.Position.PartNumber, + Index: params.Position.Index, + }, + MaxOrderLimit: params.MaxOrderLimit, + } +} + +// BatchItem returns single item for batch request. +func (params *BeginSegmentParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentBegin{ + SegmentBegin: params.toRequest(nil), + }, + } +} + +// BeginSegmentResponse response for BeginSegment request. +type BeginSegmentResponse struct { + SegmentID storj.SegmentID + Limits []*pb.AddressedOrderLimit + PiecePrivateKey storj.PiecePrivateKey +} + +func newBeginSegmentResponse(response *pb.SegmentBeginResponse) BeginSegmentResponse { + return BeginSegmentResponse{ + SegmentID: response.SegmentId, + Limits: response.AddressedLimits, + PiecePrivateKey: response.PrivateKey, + } +} + +// BeginSegment begins a segment upload. +func (client *Client) BeginSegment(ctx context.Context, params BeginSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.BeginSegment(ctx, params.toRequest(client.header())) + if err != nil { + return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err) + } + + return response.SegmentId, response.AddressedLimits, response.PrivateKey, nil +} + +// CommitSegmentParams parameters for CommitSegment method. +type CommitSegmentParams struct { + SegmentID storj.SegmentID + Encryption storj.SegmentEncryption + SizeEncryptedData int64 + + UploadResult []*pb.SegmentPieceUploadResult +} + +func (params *CommitSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentCommitRequest { + return &pb.SegmentCommitRequest{ + Header: header, + SegmentId: params.SegmentID, + + EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce, + EncryptedKey: params.Encryption.EncryptedKey, + SizeEncryptedData: params.SizeEncryptedData, + UploadResult: params.UploadResult, + } +} + +// BatchItem returns single item for batch request. +func (params *CommitSegmentParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentCommit{ + SegmentCommit: params.toRequest(nil), + }, + } +} + +// CommitSegment commits an uploaded segment. +func (client *Client) CommitSegment(ctx context.Context, params CommitSegmentParams) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.CommitSegment(ctx, params.toRequest(client.header())) + + return Error.Wrap(err) +} + +// MakeInlineSegmentParams parameters for MakeInlineSegment method. +type MakeInlineSegmentParams struct { + StreamID storj.StreamID + Position storj.SegmentPosition + Encryption storj.SegmentEncryption + EncryptedInlineData []byte +} + +func (params *MakeInlineSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentMakeInlineRequest { + return &pb.SegmentMakeInlineRequest{ + Header: header, + StreamId: params.StreamID, + Position: &pb.SegmentPosition{ + PartNumber: params.Position.PartNumber, + Index: params.Position.Index, + }, + EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce, + EncryptedKey: params.Encryption.EncryptedKey, + EncryptedInlineData: params.EncryptedInlineData, + } +} + +// BatchItem returns single item for batch request. +func (params *MakeInlineSegmentParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentMakeInline{ + SegmentMakeInline: params.toRequest(nil), + }, + } +} + +// MakeInlineSegment creates an inline segment. +func (client *Client) MakeInlineSegment(ctx context.Context, params MakeInlineSegmentParams) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.MakeInlineSegment(ctx, params.toRequest(client.header())) + + return Error.Wrap(err) +} + +// BeginDeleteSegmentParams parameters for BeginDeleteSegment method. +type BeginDeleteSegmentParams struct { + StreamID storj.StreamID + Position storj.SegmentPosition +} + +func (params *BeginDeleteSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentBeginDeleteRequest { + return &pb.SegmentBeginDeleteRequest{ + Header: header, + StreamId: params.StreamID, + Position: &pb.SegmentPosition{ + PartNumber: params.Position.PartNumber, + Index: params.Position.Index, + }, + } +} + +// BatchItem returns single item for batch request. +func (params *BeginDeleteSegmentParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentBeginDelete{ + SegmentBeginDelete: params.toRequest(nil), + }, + } +} + +// BeginDeleteSegmentResponse response for BeginDeleteSegment request. +type BeginDeleteSegmentResponse struct { + SegmentID storj.SegmentID + Limits []*pb.AddressedOrderLimit + PiecePrivateKey storj.PiecePrivateKey +} + +func newBeginDeleteSegmentResponse(response *pb.SegmentBeginDeleteResponse) BeginDeleteSegmentResponse { + return BeginDeleteSegmentResponse{ + SegmentID: response.SegmentId, + Limits: response.AddressedLimits, + PiecePrivateKey: response.PrivateKey, + } +} + +// BeginDeleteSegment begins segment deletion process. +func (client *Client) BeginDeleteSegment(ctx context.Context, params BeginDeleteSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.BeginDeleteSegment(ctx, params.toRequest(client.header())) + if err != nil { + return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err) + } + + return response.SegmentId, response.AddressedLimits, response.PrivateKey, nil +} + +// FinishDeleteSegmentParams parameters for FinishDeleteSegment method. +type FinishDeleteSegmentParams struct { + SegmentID storj.SegmentID + + DeleteResults []*pb.SegmentPieceDeleteResult +} + +func (params *FinishDeleteSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentFinishDeleteRequest { + return &pb.SegmentFinishDeleteRequest{ + Header: header, + SegmentId: params.SegmentID, + Results: params.DeleteResults, + } +} + +// BatchItem returns single item for batch request. +func (params *FinishDeleteSegmentParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentFinishDelete{ + SegmentFinishDelete: params.toRequest(nil), + }, + } +} + +// FinishDeleteSegment finishes segment upload process. +func (client *Client) FinishDeleteSegment(ctx context.Context, params FinishDeleteSegmentParams) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.FinishDeleteSegment(ctx, params.toRequest(client.header())) + + return Error.Wrap(err) +} + +// DownloadSegmentParams parameters for DownloadSegment method. +type DownloadSegmentParams struct { + StreamID storj.StreamID + Position storj.SegmentPosition +} + +func (params *DownloadSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentDownloadRequest { + return &pb.SegmentDownloadRequest{ + Header: header, + StreamId: params.StreamID, + CursorPosition: &pb.SegmentPosition{ + PartNumber: params.Position.PartNumber, + Index: params.Position.Index, + }, + } +} + +// BatchItem returns single item for batch request. +func (params *DownloadSegmentParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentDownload{ + SegmentDownload: params.toRequest(nil), + }, + } +} + +// DownloadSegmentResponse response for DownloadSegment request. +type DownloadSegmentResponse struct { + Info storj.SegmentDownloadInfo + + Limits []*pb.AddressedOrderLimit +} + +func newDownloadSegmentResponse(response *pb.SegmentDownloadResponse) DownloadSegmentResponse { + info := storj.SegmentDownloadInfo{ + SegmentID: response.SegmentId, + Size: response.SegmentSize, + EncryptedInlineData: response.EncryptedInlineData, + PiecePrivateKey: response.PrivateKey, + SegmentEncryption: storj.SegmentEncryption{ + EncryptedKeyNonce: response.EncryptedKeyNonce, + EncryptedKey: response.EncryptedKey, + }, + } + if response.Next != nil { + info.Next = storj.SegmentPosition{ + PartNumber: response.Next.PartNumber, + Index: response.Next.Index, + } + } + + for i := range response.AddressedLimits { + if response.AddressedLimits[i].Limit == nil { + response.AddressedLimits[i] = nil + } + } + return DownloadSegmentResponse{ + Info: info, + Limits: response.AddressedLimits, + } +} + +// DownloadSegment gets information for downloading remote segment or data +// from an inline segment. +func (client *Client) DownloadSegment(ctx context.Context, params DownloadSegmentParams) (_ storj.SegmentDownloadInfo, _ []*pb.AddressedOrderLimit, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.DownloadSegment(ctx, params.toRequest(client.header())) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return storj.SegmentDownloadInfo{}, nil, storj.ErrObjectNotFound.Wrap(err) + } + return storj.SegmentDownloadInfo{}, nil, Error.Wrap(err) + } + + downloadResponse := newDownloadSegmentResponse(response) + return downloadResponse.Info, downloadResponse.Limits, nil +} + +// ListSegmentsParams parameters for ListSegment method. +type ListSegmentsParams struct { + StreamID storj.StreamID + CursorPosition storj.SegmentPosition + Limit int32 +} + +// ListSegmentsResponse response for ListSegments request. +type ListSegmentsResponse struct { + Items []storj.SegmentListItem + More bool +} + +func (params *ListSegmentsParams) toRequest(header *pb.RequestHeader) *pb.SegmentListRequest { + return &pb.SegmentListRequest{ + Header: header, + StreamId: params.StreamID, + CursorPosition: &pb.SegmentPosition{ + PartNumber: params.CursorPosition.PartNumber, + Index: params.CursorPosition.Index, + }, + Limit: params.Limit, + } +} + +// BatchItem returns single item for batch request. +func (params *ListSegmentsParams) BatchItem() *pb.BatchRequestItem { + return &pb.BatchRequestItem{ + Request: &pb.BatchRequestItem_SegmentList{ + SegmentList: params.toRequest(nil), + }, + } +} + +func newListSegmentsResponse(response *pb.SegmentListResponse) ListSegmentsResponse { + items := make([]storj.SegmentListItem, len(response.Items)) + for i, responseItem := range response.Items { + items[i] = storj.SegmentListItem{ + Position: storj.SegmentPosition{ + PartNumber: responseItem.Position.PartNumber, + Index: responseItem.Position.Index, + }, + } + } + return ListSegmentsResponse{ + Items: items, + More: response.More, + } +} + +// ListSegments lists object segments. +func (client *Client) ListSegments(ctx context.Context, params ListSegmentsParams) (_ []storj.SegmentListItem, more bool, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.ListSegments(ctx, params.toRequest(client.header())) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return []storj.SegmentListItem{}, false, storj.ErrObjectNotFound.Wrap(err) + } + return []storj.SegmentListItem{}, false, Error.Wrap(err) + } + + listResponse := newListSegmentsResponse(response) + return listResponse.Items, listResponse.More, Error.Wrap(err) +} + +// Batch sends multiple requests in one batch. +func (client *Client) Batch(ctx context.Context, requests ...BatchItem) (resp []BatchResponse, err error) { + defer mon.Task()(&ctx)(&err) + + batchItems := make([]*pb.BatchRequestItem, len(requests)) + for i, request := range requests { + batchItems[i] = request.BatchItem() + } + response, err := client.client.Batch(ctx, &pb.BatchRequest{ + Header: client.header(), + Requests: batchItems, + }) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return []BatchResponse{}, storj.ErrObjectNotFound.Wrap(err) + } + + return []BatchResponse{}, Error.Wrap(err) + } + + resp = make([]BatchResponse, len(response.Responses)) + for i, response := range response.Responses { + resp[i] = BatchResponse{ + pbRequest: batchItems[i].Request, + pbResponse: response.Response, + } + } + + return resp, nil +} + +// SetRawAPIKey sets the client's raw API key. Mainly used for testing. +func (client *Client) SetRawAPIKey(key []byte) { + client.apiKeyRaw = key +} diff --git a/vendor/storj.io/uplink/private/metainfo/client_old.go b/vendor/storj.io/uplink/private/metainfo/client_old.go new file mode 100644 index 000000000..ed8cf6c58 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/client_old.go @@ -0,0 +1,175 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package metainfo + +import ( + "context" + "time" + + "storj.io/common/errs2" + "storj.io/common/pb" + "storj.io/common/rpc/rpcstatus" + "storj.io/common/storj" + "storj.io/common/uuid" +) + +// CreateSegmentOld requests the order limits for creating a new segment +func (client *Client) CreateSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, piecePrivateKey storj.PiecePrivateKey, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.CreateSegmentOld(ctx, &pb.SegmentWriteRequestOld{ + Header: client.header(), + Bucket: []byte(bucket), + Path: []byte(path), + Segment: segmentIndex, + Redundancy: redundancy, + MaxEncryptedSegmentSize: maxEncryptedSegmentSize, + Expiration: expiration, + }) + if err != nil { + return nil, rootPieceID, piecePrivateKey, Error.Wrap(err) + } + + return response.GetAddressedLimits(), response.RootPieceId, response.PrivateKey, nil +} + +// CommitSegmentOld requests to store the pointer for the segment +func (client *Client) CommitSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, pointer *pb.Pointer, originalLimits []*pb.OrderLimit) (savedPointer *pb.Pointer, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.CommitSegmentOld(ctx, &pb.SegmentCommitRequestOld{ + Header: client.header(), + Bucket: []byte(bucket), + Path: []byte(path), + Segment: segmentIndex, + Pointer: pointer, + OriginalLimits: originalLimits, + }) + if err != nil { + return nil, Error.Wrap(err) + } + + return response.GetPointer(), nil +} + +// SegmentInfoOld requests the pointer of a segment +func (client *Client) SegmentInfoOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.SegmentInfoOld(ctx, &pb.SegmentInfoRequestOld{ + Header: client.header(), + Bucket: []byte(bucket), + Path: []byte(path), + Segment: segmentIndex, + }) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return nil, storj.ErrObjectNotFound.Wrap(err) + } + return nil, Error.Wrap(err) + } + + return response.GetPointer(), nil +} + +// ReadSegmentOld requests the order limits for reading a segment +func (client *Client) ReadSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.DownloadSegmentOld(ctx, &pb.SegmentDownloadRequestOld{ + Header: client.header(), + Bucket: []byte(bucket), + Path: []byte(path), + Segment: segmentIndex, + }) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return nil, nil, piecePrivateKey, storj.ErrObjectNotFound.Wrap(err) + } + return nil, nil, piecePrivateKey, Error.Wrap(err) + } + + return response.GetPointer(), sortLimits(response.GetAddressedLimits(), response.GetPointer()), response.PrivateKey, nil +} + +// sortLimits sorts order limits and fill missing ones with nil values +func sortLimits(limits []*pb.AddressedOrderLimit, pointer *pb.Pointer) []*pb.AddressedOrderLimit { + sorted := make([]*pb.AddressedOrderLimit, pointer.GetRemote().GetRedundancy().GetTotal()) + for _, piece := range pointer.GetRemote().GetRemotePieces() { + sorted[piece.GetPieceNum()] = getLimitByStorageNodeID(limits, piece.NodeId) + } + return sorted +} + +func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID storj.NodeID) *pb.AddressedOrderLimit { + for _, limit := range limits { + if limit.GetLimit().StorageNodeId == storageNodeID { + return limit + } + } + return nil +} + +// DeleteSegmentOld requests the order limits for deleting a segment +func (client *Client) DeleteSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.DeleteSegmentOld(ctx, &pb.SegmentDeleteRequestOld{ + Header: client.header(), + Bucket: []byte(bucket), + Path: []byte(path), + Segment: segmentIndex, + }) + if err != nil { + if errs2.IsRPC(err, rpcstatus.NotFound) { + return nil, piecePrivateKey, storj.ErrObjectNotFound.Wrap(err) + } + return nil, piecePrivateKey, Error.Wrap(err) + } + + return response.GetAddressedLimits(), response.PrivateKey, nil +} + +// ListSegmentsOld lists the available segments +func (client *Client) ListSegmentsOld(ctx context.Context, bucket string, prefix, startAfter, ignoredEndBefore storj.Path, recursive bool, limit int32, metaFlags uint32) (items []ListItem, more bool, err error) { + defer mon.Task()(&ctx)(&err) + + response, err := client.client.ListSegmentsOld(ctx, &pb.ListSegmentsRequestOld{ + Header: client.header(), + Bucket: []byte(bucket), + Prefix: []byte(prefix), + StartAfter: []byte(startAfter), + Recursive: recursive, + Limit: limit, + MetaFlags: metaFlags, + }) + if err != nil { + return nil, false, Error.Wrap(err) + } + + list := response.GetItems() + items = make([]ListItem, len(list)) + for i, item := range list { + items[i] = ListItem{ + Path: storj.Path(item.GetPath()), + Pointer: item.GetPointer(), + IsPrefix: item.IsPrefix, + } + } + + return items, response.GetMore(), nil +} + +// SetAttributionOld tries to set the attribution information on the bucket. +func (client *Client) SetAttributionOld(ctx context.Context, bucket string, partnerID uuid.UUID) (err error) { + defer mon.Task()(&ctx)(&err) + + _, err = client.client.SetAttributionOld(ctx, &pb.SetAttributionRequestOld{ + Header: client.header(), + PartnerId: partnerID[:], // TODO: implement storj.UUID that can be sent using pb + BucketName: []byte(bucket), + }) + + return Error.Wrap(err) +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go new file mode 100644 index 000000000..8b9d66072 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/buckets.go @@ -0,0 +1,128 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "context" + + "github.com/zeebo/errs" + + "storj.io/common/encryption" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" +) + +// CreateBucket creates a new bucket +func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *storj.Bucket) (_ storj.Bucket, err error) { + defer mon.Task()(&ctx)(&err) + + if bucketName == "" { + return storj.Bucket{}, storj.ErrNoBucket.New("") + } + + if info == nil { + info = &storj.Bucket{PathCipher: storj.EncAESGCM} + } else { + // ensure we don't modify the input argument + clone := *info + info = &clone + } + + if info.DefaultSegmentsSize == 0 { + info.DefaultSegmentsSize = db.segmentsSize + } + + if !info.DefaultRedundancyScheme.IsZero() { + if err := validateBlockSize(info.DefaultRedundancyScheme, info.DefaultEncryptionParameters.BlockSize); err != nil { + return storj.Bucket{}, storj.ErrBucket.Wrap(err) + } + } + + if info.PathCipher < storj.EncNull || info.PathCipher > storj.EncSecretBox { + return storj.Bucket{}, encryption.ErrInvalidConfig.New("encryption type %d is not supported", info.PathCipher) + } + + info.Name = bucketName + + // uuid MarshalJSON implementation always returns err == nil + partnerID, _ := info.PartnerID.MarshalJSON() + newBucket, err := db.metainfo.CreateBucket(ctx, metainfo.CreateBucketParams{ + Name: []byte(info.Name), + PathCipher: info.PathCipher, + PartnerID: partnerID, + DefaultSegmentsSize: info.DefaultSegmentsSize, + DefaultRedundancyScheme: info.DefaultRedundancyScheme, + DefaultEncryptionParameters: info.DefaultEncryptionParameters, + }) + if err != nil { + return storj.Bucket{}, storj.ErrBucket.Wrap(err) + } + + return newBucket, nil +} + +// validateBlockSize confirms the encryption block size aligns with stripe size. +// Stripes contain encrypted data therefore we want the stripe boundaries to match +// with the encryption block size boundaries. We also want stripes to be small for +// audits, but encryption can be a bit larger. All told, block size should be an integer +// multiple of stripe size. +func validateBlockSize(redundancyScheme storj.RedundancyScheme, blockSize int32) error { + stripeSize := redundancyScheme.StripeSize() + + if stripeSize == 0 || blockSize%stripeSize != 0 { + return errs.New("encryption BlockSize (%d) must be a multiple of RS ShareSize (%d) * RS RequiredShares (%d)", + blockSize, redundancyScheme.ShareSize, redundancyScheme.RequiredShares, + ) + } + return nil +} + +// DeleteBucket deletes bucket +func (db *Project) DeleteBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { + defer mon.Task()(&ctx)(&err) + + if bucketName == "" { + return storj.Bucket{}, storj.ErrNoBucket.New("") + } + bucket, err := db.metainfo.DeleteBucket(ctx, metainfo.DeleteBucketParams{ + Name: []byte(bucketName), + }) + if err != nil { + return storj.Bucket{}, storj.ErrBucket.Wrap(err) + } + + return bucket, nil +} + +// GetBucket gets bucket information +func (db *Project) GetBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { + defer mon.Task()(&ctx)(&err) + + if bucketName == "" { + return storj.Bucket{}, storj.ErrNoBucket.New("") + } + + bucket, err := db.metainfo.GetBucket(ctx, metainfo.GetBucketParams{ + Name: []byte(bucketName), + }) + if err != nil { + return storj.Bucket{}, storj.ErrBucket.Wrap(err) + } + + return bucket, nil +} + +// ListBuckets lists buckets +func (db *Project) ListBuckets(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) { + defer mon.Task()(&ctx)(&err) + + bucketList, err := db.metainfo.ListBuckets(ctx, metainfo.ListBucketsParams{ + ListOpts: listOpts, + }) + if err != nil { + return storj.BucketList{}, storj.ErrBucket.Wrap(err) + } + + return bucketList, nil +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go new file mode 100644 index 000000000..6a6383aa6 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/interface.go @@ -0,0 +1,83 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "context" + "time" + + "storj.io/common/storj" +) + +// CreateObject has optional parameters that can be set +type CreateObject struct { + Metadata map[string]string + ContentType string + Expires time.Time + + storj.RedundancyScheme + storj.EncryptionParameters +} + +// Object converts the CreateObject to an object with unitialized values +func (create CreateObject) Object(bucket storj.Bucket, path storj.Path) storj.Object { + return storj.Object{ + Bucket: bucket, + Path: path, + Metadata: create.Metadata, + ContentType: create.ContentType, + Expires: create.Expires, + Stream: storj.Stream{ + Size: -1, // unknown + Checksum: nil, // unknown + SegmentCount: -1, // unknown + FixedSegmentSize: -1, // unknown + + RedundancyScheme: create.RedundancyScheme, + EncryptionParameters: create.EncryptionParameters, + }, + } +} + +// ReadOnlyStream is an interface for reading segment information +type ReadOnlyStream interface { + Info() storj.Object + + // SegmentsAt returns the segment that contains the byteOffset and following segments. + // Limit specifies how much to return at most. + SegmentsAt(ctx context.Context, byteOffset int64, limit int64) (infos []storj.Segment, more bool, err error) + // Segments returns the segment at index. + // Limit specifies how much to return at most. + Segments(ctx context.Context, index int64, limit int64) (infos []storj.Segment, more bool, err error) +} + +// MutableObject is an interface for manipulating creating/deleting object stream +type MutableObject interface { + // Info gets the current information about the object + Info() storj.Object + + // CreateStream creates a new stream for the object + CreateStream(ctx context.Context) (MutableStream, error) + // ContinueStream starts to continue a partially uploaded stream. + ContinueStream(ctx context.Context) (MutableStream, error) + // DeleteStream deletes any information about this objects stream + DeleteStream(ctx context.Context) error + + // Commit commits the changes to the database + Commit(ctx context.Context) error +} + +// MutableStream is an interface for manipulating stream information +type MutableStream interface { + BucketName() string + Path() string + + Expires() time.Time + Metadata() ([]byte, error) + + // AddSegments adds segments to the stream. + AddSegments(ctx context.Context, segments ...storj.Segment) error + // UpdateSegments updates information about segments. + UpdateSegments(ctx context.Context, segments ...storj.Segment) error +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go new file mode 100644 index 000000000..837c6daf4 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/metainfo.go @@ -0,0 +1,66 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "context" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/encryption" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/storage/segments" + "storj.io/uplink/private/storage/streams" +) + +var mon = monkit.Package() + +var errClass = errs.Class("kvmetainfo") + +const defaultSegmentLimit = 8 // TODO + +// DB implements metainfo database +type DB struct { + project *Project + + metainfo *metainfo.Client + + streams streams.Store + segments segments.Store + + encStore *encryption.Store +} + +// New creates a new metainfo database +func New(project *Project, metainfo *metainfo.Client, streams streams.Store, segments segments.Store, encStore *encryption.Store) *DB { + return &DB{ + project: project, + metainfo: metainfo, + streams: streams, + segments: segments, + encStore: encStore, + } +} + +// CreateBucket creates a new bucket with the specified information +func (db *DB) CreateBucket(ctx context.Context, bucketName string, info *storj.Bucket) (bucketInfo storj.Bucket, err error) { + return db.project.CreateBucket(ctx, bucketName, info) +} + +// DeleteBucket deletes bucket +func (db *DB) DeleteBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { + return db.project.DeleteBucket(ctx, bucketName) +} + +// GetBucket gets bucket information +func (db *DB) GetBucket(ctx context.Context, bucketName string) (bucketInfo storj.Bucket, err error) { + return db.project.GetBucket(ctx, bucketName) +} + +// ListBuckets lists buckets +func (db *DB) ListBuckets(ctx context.Context, options storj.BucketListOptions) (list storj.BucketList, err error) { + return db.project.ListBuckets(ctx, options) +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go new file mode 100644 index 000000000..1b8e7e5eb --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/objects.go @@ -0,0 +1,628 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "context" + "errors" + "strings" + + "storj.io/common/encryption" + "storj.io/common/memory" + "storj.io/common/paths" + "storj.io/common/pb" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/storage/segments" + "storj.io/uplink/private/storage/streams" +) + +// DefaultRS default values for RedundancyScheme +var DefaultRS = storj.RedundancyScheme{ + Algorithm: storj.ReedSolomon, + RequiredShares: 20, + RepairShares: 30, + OptimalShares: 40, + TotalShares: 50, + ShareSize: 1 * memory.KiB.Int32(), +} + +// DefaultES default values for EncryptionParameters +// BlockSize should default to the size of a stripe +var DefaultES = storj.EncryptionParameters{ + CipherSuite: storj.EncAESGCM, + BlockSize: DefaultRS.StripeSize(), +} + +var contentTypeKey = "content-type" + +// GetObject returns information about an object +func (db *DB) GetObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (info storj.Object, err error) { + defer mon.Task()(&ctx)(&err) + + _, info, err = db.getInfo(ctx, bucket, path) + + return info, err +} + +// GetObjectStream returns interface for reading the object stream +func (db *DB) GetObjectStream(ctx context.Context, bucket storj.Bucket, object storj.Object) (stream ReadOnlyStream, err error) { + defer mon.Task()(&ctx)(&err) + + if bucket.Name == "" { + return nil, storj.ErrNoBucket.New("") + } + + if object.Path == "" { + return nil, storj.ErrNoPath.New("") + } + + return &readonlyStream{ + db: db, + info: object, + }, nil +} + +// CreateObject creates an uploading object and returns an interface for uploading Object information +func (db *DB) CreateObject(ctx context.Context, bucket storj.Bucket, path storj.Path, createInfo *CreateObject) (object MutableObject, err error) { + defer mon.Task()(&ctx)(&err) + + if bucket.Name == "" { + return nil, storj.ErrNoBucket.New("") + } + + if path == "" { + return nil, storj.ErrNoPath.New("") + } + + info := storj.Object{ + Bucket: bucket, + Path: path, + } + + if createInfo != nil { + info.Metadata = createInfo.Metadata + info.ContentType = createInfo.ContentType + info.Expires = createInfo.Expires + info.RedundancyScheme = createInfo.RedundancyScheme + info.EncryptionParameters = createInfo.EncryptionParameters + } + + // TODO: autodetect content type from the path extension + // if info.ContentType == "" {} + + if info.EncryptionParameters.IsZero() { + info.EncryptionParameters = storj.EncryptionParameters{ + CipherSuite: DefaultES.CipherSuite, + BlockSize: DefaultES.BlockSize, + } + } + + if info.RedundancyScheme.IsZero() { + info.RedundancyScheme = DefaultRS + + // If the provided EncryptionParameters.BlockSize isn't a multiple of the + // DefaultRS stripeSize, then overwrite the EncryptionParameters with the DefaultES values + if err := validateBlockSize(DefaultRS, info.EncryptionParameters.BlockSize); err != nil { + info.EncryptionParameters.BlockSize = DefaultES.BlockSize + } + } + + return &mutableObject{ + db: db, + info: info, + }, nil +} + +// ModifyObject modifies a committed object +func (db *DB) ModifyObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (object MutableObject, err error) { + defer mon.Task()(&ctx)(&err) + return nil, errors.New("not implemented") +} + +// DeleteObject deletes an object from database +func (db *DB) DeleteObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (_ storj.Object, err error) { + defer mon.Task()(&ctx)(&err) + + if bucket.Name == "" { + return storj.Object{}, storj.ErrNoBucket.New("") + } + + if len(path) == 0 { + return storj.Object{}, storj.ErrNoPath.New("") + } + + info, err := db.streams.Delete(ctx, storj.JoinPaths(bucket.Name, path)) + if err != nil { + return storj.Object{}, err + } + + encPath, err := encryption.EncryptPathWithStoreCipher(bucket.Name, paths.NewUnencrypted(path), db.encStore) + if err != nil { + return storj.Object{}, err + } + + _, obj, err := objectFromInfo(ctx, bucket, path, encPath, info, db.encStore) + return obj, err +} + +// ModifyPendingObject creates an interface for updating a partially uploaded object +func (db *DB) ModifyPendingObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (object MutableObject, err error) { + defer mon.Task()(&ctx)(&err) + return nil, errors.New("not implemented") +} + +// ListPendingObjects lists pending objects in bucket based on the ListOptions +func (db *DB) ListPendingObjects(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) { + defer mon.Task()(&ctx)(&err) + return storj.ObjectList{}, errors.New("not implemented") +} + +// ListObjects lists objects in bucket based on the ListOptions +func (db *DB) ListObjects(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) { + defer mon.Task()(&ctx)(&err) + + if bucket.Name == "" { + return storj.ObjectList{}, storj.ErrNoBucket.New("") + } + + var startAfter string + switch options.Direction { + // TODO for now we are supporting only storj.After + // case storj.Forward: + // // forward lists forwards from cursor, including cursor + // startAfter = keyBefore(options.Cursor) + case storj.After: + // after lists forwards from cursor, without cursor + startAfter = options.Cursor + default: + return storj.ObjectList{}, errClass.New("invalid direction %d", options.Direction) + } + + // TODO: we should let libuplink users be able to determine what metadata fields they request as well + // metaFlags := meta.All + // if db.pathCipher(bucket) == storj.EncNull || db.pathCipher(bucket) == storj.EncNullBase64URL { + // metaFlags = meta.None + // } + + // TODO use flags with listing + // if metaFlags&meta.Size != 0 { + // Calculating the stream's size require also the user-defined metadata, + // where stream store keeps info about the number of segments and their size. + // metaFlags |= meta.UserDefined + // } + + prefix := streams.ParsePath(storj.JoinPaths(bucket.Name, options.Prefix)) + prefixKey, err := encryption.DerivePathKey(prefix.Bucket(), streams.PathForKey(prefix.UnencryptedPath().Raw()), db.encStore) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + encPrefix, err := encryption.EncryptPathWithStoreCipher(prefix.Bucket(), prefix.UnencryptedPath(), db.encStore) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + // If the raw unencrypted path ends in a `/` we need to remove the final + // section of the encrypted path. For example, if we are listing the path + // `/bob/`, the encrypted path results in `enc("")/enc("bob")/enc("")`. This + // is an incorrect list prefix, what we really want is `enc("")/enc("bob")` + if strings.HasSuffix(prefix.UnencryptedPath().Raw(), "/") { + lastSlashIdx := strings.LastIndex(encPrefix.Raw(), "/") + encPrefix = paths.NewEncrypted(encPrefix.Raw()[:lastSlashIdx]) + } + + // We have to encrypt startAfter but only if it doesn't contain a bucket. + // It contains a bucket if and only if the prefix has no bucket. This is why it is a raw + // string instead of a typed string: it's either a bucket or an unencrypted path component + // and that isn't known at compile time. + needsEncryption := prefix.Bucket() != "" + var base *encryption.Base + if needsEncryption { + _, _, base = db.encStore.LookupEncrypted(prefix.Bucket(), encPrefix) + + startAfter, err = encryption.EncryptPathRaw(startAfter, db.pathCipher(base.PathCipher), prefixKey) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + } + + items, more, err := db.metainfo.ListObjects(ctx, metainfo.ListObjectsParams{ + Bucket: []byte(bucket.Name), + EncryptedPrefix: []byte(encPrefix.Raw()), + EncryptedCursor: []byte(startAfter), + Limit: int32(options.Limit), + Recursive: options.Recursive, + }) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + list = storj.ObjectList{ + Bucket: bucket.Name, + Prefix: options.Prefix, + More: more, + Items: make([]storj.Object, len(items)), + } + + for i, item := range items { + var path streams.Path + var itemPath string + + if needsEncryption { + itemPath, err = encryption.DecryptPathRaw(string(item.EncryptedPath), db.pathCipher(base.PathCipher), prefixKey) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + // TODO(jeff): this shouldn't be necessary if we handled trailing slashes + // appropriately. there's some issues with list. + fullPath := prefix.UnencryptedPath().Raw() + if len(fullPath) > 0 && fullPath[len(fullPath)-1] != '/' { + fullPath += "/" + } + fullPath += itemPath + + path = streams.CreatePath(prefix.Bucket(), paths.NewUnencrypted(fullPath)) + } else { + itemPath = string(item.EncryptedPath) + path = streams.CreatePath(string(item.EncryptedPath), paths.Unencrypted{}) + } + + stream, streamMeta, err := streams.TypedDecryptStreamInfo(ctx, item.EncryptedMetadata, path, db.encStore) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + object, err := objectFromMeta(bucket, itemPath, item, stream, streamMeta) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + list.Items[i] = object + } + + return list, nil +} + +// ListObjectsExtended lists objects in bucket based on the ListOptions +func (db *DB) ListObjectsExtended(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) { + defer mon.Task()(&ctx)(&err) + + if bucket.Name == "" { + return storj.ObjectList{}, storj.ErrNoBucket.New("") + } + + if options.Prefix != "" && !strings.HasSuffix(options.Prefix, "/") { + return storj.ObjectList{}, Error.New("prefix should end with slash") + } + + var startAfter string + switch options.Direction { + // TODO for now we are supporting only storj.After + // case storj.Forward: + // // forward lists forwards from cursor, including cursor + // startAfter = keyBefore(options.Cursor) + case storj.After: + // after lists forwards from cursor, without cursor + startAfter = options.Cursor + default: + return storj.ObjectList{}, errClass.New("invalid direction %d", options.Direction) + } + + // TODO: we should let libuplink users be able to determine what metadata fields they request as well + // metaFlags := meta.All + // if db.pathCipher(bucket) == storj.EncNull || db.pathCipher(bucket) == storj.EncNullBase64URL { + // metaFlags = meta.None + // } + + // TODO use flags with listing + // if metaFlags&meta.Size != 0 { + // Calculating the stream's size require also the user-defined metadata, + // where stream store keeps info about the number of segments and their size. + // metaFlags |= meta.UserDefined + // } + + // Remove the trailing slash from list prefix. + // Otherwise, if we the list prefix is `/bob/`, the encrypted list + // prefix results in `enc("")/enc("bob")/enc("")`. This is an incorrect + // encrypted prefix, what we really want is `enc("")/enc("bob")`. + prefix := streams.ParsePath(storj.JoinPaths(bucket.Name, strings.TrimSuffix(options.Prefix, "/"))) + prefixKey, err := encryption.DerivePathKey(prefix.Bucket(), streams.PathForKey(prefix.UnencryptedPath().Raw()), db.encStore) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + encPrefix, err := encryption.EncryptPathWithStoreCipher(prefix.Bucket(), prefix.UnencryptedPath(), db.encStore) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + // We have to encrypt startAfter but only if it doesn't contain a bucket. + // It contains a bucket if and only if the prefix has no bucket. This is why it is a raw + // string instead of a typed string: it's either a bucket or an unencrypted path component + // and that isn't known at compile time. + needsEncryption := prefix.Bucket() != "" + var base *encryption.Base + if needsEncryption { + _, _, base = db.encStore.LookupEncrypted(prefix.Bucket(), encPrefix) + + startAfter, err = encryption.EncryptPathRaw(startAfter, db.pathCipher(base.PathCipher), prefixKey) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + } + + items, more, err := db.metainfo.ListObjects(ctx, metainfo.ListObjectsParams{ + Bucket: []byte(bucket.Name), + EncryptedPrefix: []byte(encPrefix.Raw()), + EncryptedCursor: []byte(startAfter), + Limit: int32(options.Limit), + Recursive: options.Recursive, + }) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + list = storj.ObjectList{ + Bucket: bucket.Name, + Prefix: options.Prefix, + More: more, + Items: make([]storj.Object, 0, len(items)), + } + + for _, item := range items { + var path streams.Path + var itemPath string + + if needsEncryption { + itemPath, err = encryption.DecryptPathRaw(string(item.EncryptedPath), db.pathCipher(base.PathCipher), prefixKey) + if err != nil { + // skip items that cannot be decrypted + if encryption.ErrDecryptFailed.Has(err) { + continue + } + return storj.ObjectList{}, errClass.Wrap(err) + } + + // TODO(jeff): this shouldn't be necessary if we handled trailing slashes + // appropriately. there's some issues with list. + fullPath := prefix.UnencryptedPath().Raw() + if len(fullPath) > 0 && fullPath[len(fullPath)-1] != '/' { + fullPath += "/" + } + fullPath += itemPath + + path = streams.CreatePath(prefix.Bucket(), paths.NewUnencrypted(fullPath)) + } else { + itemPath = string(item.EncryptedPath) + path = streams.CreatePath(string(item.EncryptedPath), paths.Unencrypted{}) + } + + stream, streamMeta, err := streams.TypedDecryptStreamInfo(ctx, item.EncryptedMetadata, path, db.encStore) + if err != nil { + // skip items that cannot be decrypted + if encryption.ErrDecryptFailed.Has(err) { + continue + } + return storj.ObjectList{}, errClass.Wrap(err) + } + + object, err := objectFromMeta(bucket, itemPath, item, stream, streamMeta) + if err != nil { + return storj.ObjectList{}, errClass.Wrap(err) + } + + list.Items = append(list.Items, object) + } + + return list, nil +} + +func (db *DB) pathCipher(pathCipher storj.CipherSuite) storj.CipherSuite { + if db.encStore.EncryptionBypass { + return storj.EncNullBase64URL + } + return pathCipher +} + +type object struct { + fullpath streams.Path + bucket string + encPath paths.Encrypted + lastSegmentMeta segments.Meta + streamInfo *pb.StreamInfo + streamMeta pb.StreamMeta +} + +func (db *DB) getInfo(ctx context.Context, bucket storj.Bucket, path storj.Path) (obj object, info storj.Object, err error) { + defer mon.Task()(&ctx)(&err) + + if bucket.Name == "" { + return object{}, storj.Object{}, storj.ErrNoBucket.New("") + } + + if path == "" { + return object{}, storj.Object{}, storj.ErrNoPath.New("") + } + + encPath, err := encryption.EncryptPathWithStoreCipher(bucket.Name, paths.NewUnencrypted(path), db.encStore) + if err != nil { + return object{}, storj.Object{}, err + } + + objectInfo, err := db.metainfo.GetObject(ctx, metainfo.GetObjectParams{ + Bucket: []byte(bucket.Name), + EncryptedPath: []byte(encPath.Raw()), + }) + if err != nil { + return object{}, storj.Object{}, err + } + + return objectFromInfo(ctx, bucket, path, encPath, objectInfo, db.encStore) +} + +func objectFromInfo(ctx context.Context, bucket storj.Bucket, path storj.Path, encPath paths.Encrypted, objectInfo storj.ObjectInfo, encStore *encryption.Store) (object, storj.Object, error) { + if objectInfo.Bucket == "" { // zero objectInfo + return object{}, storj.Object{}, nil + } + + fullpath := streams.CreatePath(bucket.Name, paths.NewUnencrypted(path)) + lastSegmentMeta := segments.Meta{ + Modified: objectInfo.Created, + Expiration: objectInfo.Expires, + Size: objectInfo.Size, + Data: objectInfo.Metadata, + } + + streamInfo, streamMeta, err := streams.TypedDecryptStreamInfo(ctx, lastSegmentMeta.Data, fullpath, encStore) + if err != nil { + return object{}, storj.Object{}, err + } + + info, err := objectStreamFromMeta(bucket, path, objectInfo.StreamID, lastSegmentMeta, streamInfo, streamMeta, objectInfo.Stream.RedundancyScheme) + if err != nil { + return object{}, storj.Object{}, err + } + + return object{ + fullpath: fullpath, + bucket: bucket.Name, + encPath: encPath, + lastSegmentMeta: lastSegmentMeta, + streamInfo: streamInfo, + streamMeta: streamMeta, + }, info, nil +} + +func objectFromMeta(bucket storj.Bucket, path storj.Path, listItem storj.ObjectListItem, stream *pb.StreamInfo, streamMeta pb.StreamMeta) (storj.Object, error) { + object := storj.Object{ + Version: 0, // TODO: + Bucket: bucket, + Path: path, + IsPrefix: listItem.IsPrefix, + + Created: listItem.CreatedAt, // TODO: use correct field + Modified: listItem.CreatedAt, // TODO: use correct field + Expires: listItem.ExpiresAt, + } + + err := updateObjectWithStream(&object, stream, streamMeta) + if err != nil { + return storj.Object{}, err + } + + return object, nil +} + +func objectStreamFromMeta(bucket storj.Bucket, path storj.Path, streamID storj.StreamID, lastSegment segments.Meta, stream *pb.StreamInfo, streamMeta pb.StreamMeta, redundancyScheme storj.RedundancyScheme) (storj.Object, error) { + var nonce storj.Nonce + var encryptedKey storj.EncryptedPrivateKey + if streamMeta.LastSegmentMeta != nil { + copy(nonce[:], streamMeta.LastSegmentMeta.KeyNonce) + encryptedKey = streamMeta.LastSegmentMeta.EncryptedKey + } + + rv := storj.Object{ + Version: 0, // TODO: + Bucket: bucket, + Path: path, + IsPrefix: false, + + Created: lastSegment.Modified, // TODO: use correct field + Modified: lastSegment.Modified, // TODO: use correct field + Expires: lastSegment.Expiration, // TODO: use correct field + + Stream: storj.Stream{ + ID: streamID, + + RedundancyScheme: redundancyScheme, + EncryptionParameters: storj.EncryptionParameters{ + CipherSuite: storj.CipherSuite(streamMeta.EncryptionType), + BlockSize: streamMeta.EncryptionBlockSize, + }, + LastSegment: storj.LastSegment{ + EncryptedKeyNonce: nonce, + EncryptedKey: encryptedKey, + }, + }, + } + + err := updateObjectWithStream(&rv, stream, streamMeta) + if err != nil { + return storj.Object{}, err + } + + return rv, nil +} + +func updateObjectWithStream(object *storj.Object, stream *pb.StreamInfo, streamMeta pb.StreamMeta) error { + if stream == nil { + return nil + } + + serializableMeta := pb.SerializableMeta{} + err := pb.Unmarshal(stream.Metadata, &serializableMeta) + if err != nil { + return err + } + + // ensure that the map is not nil + if serializableMeta.UserDefined == nil { + serializableMeta.UserDefined = map[string]string{} + } + + _, found := serializableMeta.UserDefined[contentTypeKey] + if !found && serializableMeta.ContentType != "" { + serializableMeta.UserDefined[contentTypeKey] = serializableMeta.ContentType + } + + segmentCount := numberOfSegments(stream, streamMeta) + object.Metadata = serializableMeta.UserDefined + object.Stream.Size = ((segmentCount - 1) * stream.SegmentsSize) + stream.LastSegmentSize + object.Stream.SegmentCount = segmentCount + object.Stream.FixedSegmentSize = stream.SegmentsSize + object.Stream.LastSegment.Size = stream.LastSegmentSize + + return nil +} + +type mutableObject struct { + db *DB + info storj.Object +} + +func (object *mutableObject) Info() storj.Object { return object.info } + +func (object *mutableObject) CreateStream(ctx context.Context) (_ MutableStream, err error) { + defer mon.Task()(&ctx)(&err) + return &mutableStream{ + db: object.db, + info: object.info, + }, nil +} + +func (object *mutableObject) ContinueStream(ctx context.Context) (_ MutableStream, err error) { + defer mon.Task()(&ctx)(&err) + return nil, errors.New("not implemented") +} + +func (object *mutableObject) DeleteStream(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + return errors.New("not implemented") +} + +func (object *mutableObject) Commit(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + _, info, err := object.db.getInfo(ctx, object.info.Bucket, object.info.Path) + object.info = info + return err +} + +func numberOfSegments(stream *pb.StreamInfo, streamMeta pb.StreamMeta) int64 { + if streamMeta.NumberOfSegments > 0 { + return streamMeta.NumberOfSegments + } + return stream.DeprecatedNumberOfSegments +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/paths.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/paths.go new file mode 100644 index 000000000..7562f5baf --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/paths.go @@ -0,0 +1,29 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +// TODO: known issue: +// this is incorrect since there's no good way to get such a path +// since the exact previous key is +// append(previousPrefix(cursor), infinite(0xFF)...) + +// TODO commented until we will decide if we will support direction for objects listing +// func keyBefore(cursor string) string { +// if cursor == "" { +// return "" +// } + +// before := []byte(cursor) +// if before[len(before)-1] == 0 { +// return string(before[:len(before)-1]) +// } +// before[len(before)-1]-- + +// before = append(before, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f) +// return string(before) +// } + +// func keyAfter(cursor string) string { +// return cursor + "\x00" +// } diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go new file mode 100644 index 000000000..b14b3b75a --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/project.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/storage/streams" +) + +// Project implements project management operations +type Project struct { + metainfo metainfo.Client + streams streams.Store + encryptedBlockSize int32 + segmentsSize int64 +} + +// NewProject constructs a *Project +func NewProject(streams streams.Store, encryptedBlockSize int32, segmentsSize int64, metainfo metainfo.Client) *Project { + return &Project{ + metainfo: metainfo, + streams: streams, + encryptedBlockSize: encryptedBlockSize, + segmentsSize: segmentsSize, + } +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/stream.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/stream.go new file mode 100644 index 000000000..ce97de355 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/stream.go @@ -0,0 +1,155 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "context" + "errors" + "time" + + "storj.io/common/encryption" + "storj.io/common/paths" + "storj.io/common/pb" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" +) + +var _ ReadOnlyStream = (*readonlyStream)(nil) + +type readonlyStream struct { + db *DB + + info storj.Object +} + +func (stream *readonlyStream) Info() storj.Object { return stream.info } + +func (stream *readonlyStream) SegmentsAt(ctx context.Context, byteOffset int64, limit int64) (infos []storj.Segment, more bool, err error) { + defer mon.Task()(&ctx)(&err) + + if stream.info.FixedSegmentSize <= 0 { + return nil, false, errors.New("not implemented") + } + + index := byteOffset / stream.info.FixedSegmentSize + return stream.Segments(ctx, index, limit) +} + +func (stream *readonlyStream) segment(ctx context.Context, index int64) (segment storj.Segment, err error) { + defer mon.Task()(&ctx)(&err) + + segment = storj.Segment{ + Index: index, + } + + isLastSegment := segment.Index+1 == stream.info.SegmentCount + if isLastSegment { + index = -1 + } + info, limits, err := stream.db.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{ + StreamID: stream.Info().ID, + Position: storj.SegmentPosition{ + Index: int32(index), + }, + }) + if err != nil { + return segment, err + } + + segment.Size = stream.info.Size + segment.EncryptedKeyNonce = info.SegmentEncryption.EncryptedKeyNonce + segment.EncryptedKey = info.SegmentEncryption.EncryptedKey + + streamKey, err := encryption.DeriveContentKey(stream.info.Bucket.Name, paths.NewUnencrypted(stream.info.Path), stream.db.encStore) + if err != nil { + return segment, err + } + + contentKey, err := encryption.DecryptKey(segment.EncryptedKey, stream.info.EncryptionParameters.CipherSuite, streamKey, &segment.EncryptedKeyNonce) + if err != nil { + return segment, err + } + + nonce := new(storj.Nonce) + _, err = encryption.Increment(nonce, segment.Index+1) + if err != nil { + return segment, err + } + + if len(info.EncryptedInlineData) != 0 || len(limits) == 0 { + inline, err := encryption.Decrypt(info.EncryptedInlineData, stream.info.EncryptionParameters.CipherSuite, contentKey, nonce) + if err != nil { + return segment, err + } + segment.Inline = inline + } + + return segment, nil +} + +func (stream *readonlyStream) Segments(ctx context.Context, index int64, limit int64) (infos []storj.Segment, more bool, err error) { + defer mon.Task()(&ctx)(&err) + + if index < 0 { + return nil, false, errors.New("invalid argument") + } + if limit <= 0 { + limit = defaultSegmentLimit + } + if index >= stream.info.SegmentCount { + return nil, false, nil + } + + infos = make([]storj.Segment, 0, limit) + for ; index < stream.info.SegmentCount && limit > 0; index++ { + limit-- + segment, err := stream.segment(ctx, index) + if err != nil { + return nil, false, err + } + infos = append(infos, segment) + } + + more = index < stream.info.SegmentCount + return infos, more, nil +} + +type mutableStream struct { + db *DB + info storj.Object +} + +func (stream *mutableStream) BucketName() string { return stream.info.Bucket.Name } +func (stream *mutableStream) Path() string { return stream.info.Path } + +func (stream *mutableStream) Info() storj.Object { return stream.info } + +func (stream *mutableStream) Expires() time.Time { return stream.info.Expires } + +func (stream *mutableStream) Metadata() ([]byte, error) { + if stream.info.ContentType != "" { + if stream.info.Metadata == nil { + stream.info.Metadata = make(map[string]string) + stream.info.Metadata[contentTypeKey] = stream.info.ContentType + } else if _, found := stream.info.Metadata[contentTypeKey]; !found { + stream.info.Metadata[contentTypeKey] = stream.info.ContentType + } + } + if stream.info.Metadata == nil { + return []byte{}, nil + } + return pb.Marshal(&pb.SerializableMeta{ + UserDefined: stream.info.Metadata, + }) +} + +func (stream *mutableStream) AddSegments(ctx context.Context, segments ...storj.Segment) (err error) { + defer mon.Task()(&ctx)(&err) + return errors.New("not implemented") +} + +func (stream *mutableStream) UpdateSegments(ctx context.Context, segments ...storj.Segment) (err error) { + defer mon.Task()(&ctx)(&err) + return errors.New("not implemented") +} diff --git a/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go new file mode 100644 index 000000000..19f66cbe6 --- /dev/null +++ b/vendor/storj.io/uplink/private/metainfo/kvmetainfo/temputils.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package kvmetainfo + +import ( + "github.com/zeebo/errs" + + "storj.io/common/encryption" + "storj.io/common/memory" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/storage/segments" + "storj.io/uplink/private/storage/streams" +) + +var ( + // Error is the errs class of SetupProject + Error = errs.Class("SetupProject error") +) + +// SetupProject creates a project with temporary values until we can figure out how to bypass encryption related setup +func SetupProject(m *metainfo.Client) (*Project, error) { + maxBucketMetaSize := 10 * memory.MiB + segment := segments.NewSegmentStore(m, nil) + + // volatile warning: we're setting an encryption key of all zeros for bucket + // metadata, when really the bucket metadata should be stored in a different + // system altogether. + // TODO: https://storjlabs.atlassian.net/browse/V3-1967 + encStore := encryption.NewStore() + encStore.SetDefaultKey(new(storj.Key)) + strms, err := streams.NewStreamStore(m, segment, maxBucketMetaSize.Int64(), encStore, memory.KiB.Int(), storj.EncAESGCM, maxBucketMetaSize.Int(), maxBucketMetaSize.Int64()) + if err != nil { + return nil, Error.New("failed to create streams: %v", err) + } + + return NewProject(strms, memory.KiB.Int32(), 64*memory.MiB.Int64(), *m), nil +} diff --git a/vendor/storj.io/uplink/private/piecestore/buffering.go b/vendor/storj.io/uplink/private/piecestore/buffering.go new file mode 100644 index 000000000..ecc79a375 --- /dev/null +++ b/vendor/storj.io/uplink/private/piecestore/buffering.go @@ -0,0 +1,132 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package piecestore + +import ( + "bufio" + "context" + "sync" + + "github.com/zeebo/errs" + + "storj.io/common/pb" +) + +// BufferedUpload implements buffering for an Upload. +type BufferedUpload struct { + buffer bufio.Writer + upload *Upload +} + +// NewBufferedUpload creates buffered upload with the specified size. +func NewBufferedUpload(upload *Upload, size int) Uploader { + buffered := &BufferedUpload{} + buffered.upload = upload + buffered.buffer = *bufio.NewWriterSize(buffered.upload, size) + return buffered +} + +// Write writes content to the buffer and flushes it to the upload once enough data has been gathered. +func (upload *BufferedUpload) Write(data []byte) (int, error) { + return upload.buffer.Write(data) +} + +// Cancel aborts the upload. +func (upload *BufferedUpload) Cancel(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + return upload.upload.Cancel(ctx) +} + +// Commit flushes any remaining content from buffer and commits the upload. +func (upload *BufferedUpload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) { + defer mon.Task()(&ctx)(&err) + flushErr := upload.buffer.Flush() + piece, closeErr := upload.upload.Commit(ctx) + return piece, errs.Combine(flushErr, closeErr) +} + +// BufferedDownload implements buffering for download. +type BufferedDownload struct { + buffer bufio.Reader + download *Download +} + +// NewBufferedDownload creates a buffered download with the specified size. +func NewBufferedDownload(download *Download, size int) Downloader { + buffered := &BufferedDownload{} + buffered.download = download + buffered.buffer = *bufio.NewReaderSize(buffered.download, size) + return buffered +} + +// Read reads from the buffer and downloading in batches once it's empty. +func (download *BufferedDownload) Read(p []byte) (int, error) { + return download.buffer.Read(p) +} + +// Close closes the buffered download. +func (download *BufferedDownload) Close() error { + return download.download.Close() +} + +// GetHashAndLimit gets the download's hash and original order limit. +func (download *BufferedDownload) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) { + return download.download.GetHashAndLimit() +} + +// LockingUpload adds a lock around upload making it safe to use concurrently. +// TODO: this shouldn't be needed. +type LockingUpload struct { + mu sync.Mutex + upload Uploader +} + +// Write uploads data. +func (upload *LockingUpload) Write(p []byte) (int, error) { + upload.mu.Lock() + defer upload.mu.Unlock() + return upload.upload.Write(p) +} + +// Cancel aborts the upload. +func (upload *LockingUpload) Cancel(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + upload.mu.Lock() + defer upload.mu.Unlock() + return upload.upload.Cancel(ctx) +} + +// Commit finishes the upload. +func (upload *LockingUpload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) { + defer mon.Task()(&ctx)(&err) + upload.mu.Lock() + defer upload.mu.Unlock() + return upload.upload.Commit(ctx) +} + +// LockingDownload adds a lock around download making it safe to use concurrently. +// TODO: this shouldn't be needed. +type LockingDownload struct { + mu sync.Mutex + download Downloader +} + +// Read downloads content. +func (download *LockingDownload) Read(p []byte) (int, error) { + download.mu.Lock() + defer download.mu.Unlock() + return download.download.Read(p) +} + +// Close closes the deownload. +func (download *LockingDownload) Close() error { + download.mu.Lock() + defer download.mu.Unlock() + return download.download.Close() +} + +// GetHashAndLimit gets the download's hash and original order limit +func (download *LockingDownload) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) { + return download.download.GetHashAndLimit() +} diff --git a/vendor/storj.io/uplink/private/piecestore/client.go b/vendor/storj.io/uplink/private/piecestore/client.go new file mode 100644 index 000000000..b03a62ce3 --- /dev/null +++ b/vendor/storj.io/uplink/private/piecestore/client.go @@ -0,0 +1,121 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package piecestore + +import ( + "context" + "io" + + "github.com/zeebo/errs" + "go.uber.org/zap" + + "storj.io/common/identity" + "storj.io/common/memory" + "storj.io/common/pb" + "storj.io/common/rpc" + "storj.io/common/storj" +) + +// Error is the default error class for piecestore client. +var Error = errs.Class("piecestore") + +// Config defines piecestore client parameters for upload and download. +type Config struct { + UploadBufferSize int64 + DownloadBufferSize int64 + + InitialStep int64 + MaximumStep int64 +} + +// DefaultConfig are the default params used for upload and download. +var DefaultConfig = Config{ + UploadBufferSize: 256 * memory.KiB.Int64(), + DownloadBufferSize: 256 * memory.KiB.Int64(), + + InitialStep: 64 * memory.KiB.Int64(), + MaximumStep: 1 * memory.MiB.Int64(), +} + +// Client implements uploading, downloading and deleting content from a piecestore. +type Client struct { + log *zap.Logger + client pb.DRPCPiecestoreClient + conn *rpc.Conn + config Config +} + +// Dial dials the target piecestore endpoint. +func Dial(ctx context.Context, dialer rpc.Dialer, target *pb.Node, log *zap.Logger, config Config) (*Client, error) { + conn, err := dialer.DialNode(ctx, target) + if err != nil { + return nil, Error.Wrap(err) + } + + return &Client{ + log: log, + client: pb.NewDRPCPiecestoreClient(conn), + conn: conn, + config: config, + }, nil +} + +// Delete uses delete order limit to delete a piece on piece store. +// +// DEPRECATED in favor of DeletePieces. +func (client *Client) Delete(ctx context.Context, limit *pb.OrderLimit, privateKey storj.PiecePrivateKey) (err error) { + defer mon.Task()(&ctx)(&err) + _, err = client.client.Delete(ctx, &pb.PieceDeleteRequest{ + Limit: limit, + }) + return Error.Wrap(err) +} + +// DeletePieces deletes a set of pieces. +func (client *Client) DeletePieces(ctx context.Context, ids ...storj.PieceID) (err error) { + defer mon.Task()(&ctx)(&err) + if len(ids) == 0 { + // Avoid RPC calls if no pieces to delete. + return nil + } + _, err = client.client.DeletePieces(ctx, &pb.DeletePiecesRequest{ + PieceIds: ids, + }) + return Error.Wrap(err) +} + +// Retain uses a bloom filter to tell the piece store which pieces to keep. +func (client *Client) Retain(ctx context.Context, req *pb.RetainRequest) (err error) { + defer mon.Task()(&ctx)(&err) + _, err = client.client.Retain(ctx, req) + return Error.Wrap(err) +} + +// Close closes the underlying connection. +func (client *Client) Close() error { + return client.conn.Close() +} + +// GetPeerIdentity gets the connection's peer identity +func (client *Client) GetPeerIdentity() (*identity.PeerIdentity, error) { + return client.conn.PeerIdentity() +} + +// next allocation step find the next trusted step. +func (client *Client) nextAllocationStep(previous int64) int64 { + // TODO: ensure that this is frame idependent + next := previous * 3 / 2 + if next > client.config.MaximumStep { + next = client.config.MaximumStep + } + return next +} + +// ignoreEOF is an utility func for ignoring EOF error, when it's not important. +func ignoreEOF(err error) error { + if err == io.EOF { + return nil + } + return err +} diff --git a/vendor/storj.io/uplink/private/piecestore/download.go b/vendor/storj.io/uplink/private/piecestore/download.go new file mode 100644 index 000000000..303bb137d --- /dev/null +++ b/vendor/storj.io/uplink/private/piecestore/download.go @@ -0,0 +1,308 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package piecestore + +import ( + "context" + "fmt" + "io" + + "github.com/zeebo/errs" + + "storj.io/common/errs2" + "storj.io/common/identity" + "storj.io/common/pb" + "storj.io/common/signing" + "storj.io/common/storj" +) + +// Downloader is interface that can be used for downloading content. +// It matches signature of `io.ReadCloser`, with one extra function, +// GetHashAndLimit(), used for accessing information during GET_REPAIR. +type Downloader interface { + Read([]byte) (int, error) + Close() error + GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) +} + +// Download implements downloading from a piecestore. +type Download struct { + client *Client + limit *pb.OrderLimit + privateKey storj.PiecePrivateKey + peer *identity.PeerIdentity + stream downloadStream + ctx context.Context + + read int64 // how much data we have read so far + allocated int64 // how far have we sent orders + downloaded int64 // how much data have we downloaded + downloadSize int64 // how much do we want to download + + // what is the step we consider to upload + allocationStep int64 + + unread ReadBuffer + + // hash and originLimit are received in the event of a GET_REPAIR + hash *pb.PieceHash + originLimit *pb.OrderLimit + + closed bool + closingError error +} + +type downloadStream interface { + CloseSend() error + Send(*pb.PieceDownloadRequest) error + Recv() (*pb.PieceDownloadResponse, error) +} + +// Download starts a new download using the specified order limit at the specified offset and size. +func (client *Client) Download(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey, offset, size int64) (_ Downloader, err error) { + defer mon.Task()(&ctx)(&err) + + peer, err := client.conn.PeerIdentity() + if err != nil { + return nil, ErrInternal.Wrap(err) + } + + stream, err := client.client.Download(ctx) + if err != nil { + return nil, err + } + + err = stream.Send(&pb.PieceDownloadRequest{ + Limit: limit, + Chunk: &pb.PieceDownloadRequest_Chunk{ + Offset: offset, + ChunkSize: size, + }, + }) + if err != nil { + _, recvErr := stream.Recv() + return nil, ErrProtocol.Wrap(errs.Combine(err, recvErr)) + } + + download := &Download{ + client: client, + limit: limit, + privateKey: piecePrivateKey, + peer: peer, + stream: stream, + ctx: ctx, + + read: 0, + + allocated: 0, + downloaded: 0, + downloadSize: size, + + allocationStep: client.config.InitialStep, + } + + if client.config.DownloadBufferSize <= 0 { + return &LockingDownload{download: download}, nil + } + return &LockingDownload{ + download: NewBufferedDownload(download, int(client.config.DownloadBufferSize)), + }, nil +} + +// Read downloads data from the storage node allocating as necessary. +func (client *Download) Read(data []byte) (read int, err error) { + ctx := client.ctx + defer mon.Task()(&ctx, "node: "+client.peer.ID.String()[0:8])(&err) + + if client.closed { + return 0, io.ErrClosedPipe + } + + for client.read < client.downloadSize { + // read from buffer + n, err := client.unread.Read(data) + client.read += int64(n) + read += n + + // if we have an error return the error + if err != nil { + return read, err + } + // if we are pending for an error, avoid further requests, but try to finish what's in unread buffer. + if client.unread.Errored() { + return read, nil + } + + // do we need to send a new order to storagenode + if client.allocated-client.downloaded < client.allocationStep { + newAllocation := client.allocationStep + + // have we downloaded more than we have allocated due to a generous storagenode? + if client.allocated-client.downloaded < 0 { + newAllocation += client.downloaded - client.allocated + } + + // ensure we don't allocate more than we intend to read + if client.allocated+newAllocation > client.downloadSize { + newAllocation = client.downloadSize - client.allocated + } + + // send an order + if newAllocation > 0 { + order, err := signing.SignUplinkOrder(ctx, client.privateKey, &pb.Order{ + SerialNumber: client.limit.SerialNumber, + Amount: newAllocation, + }) + if err != nil { + // we are signing so we shouldn't propagate this into close, + // however we should include this as a read error + client.unread.IncludeError(err) + client.closeWithError(nil) + return read, nil + } + + err = client.stream.Send(&pb.PieceDownloadRequest{ + Order: order, + }) + if err != nil { + // other side doesn't want to talk to us anymore or network went down + client.unread.IncludeError(err) + // if it's a cancellation, then we'll just close with context.Canceled + if errs2.IsCanceled(err) { + client.closeWithError(err) + return read, err + } + // otherwise, something else happened and we should try to ask the other side + client.closeAndTryFetchError() + return read, nil + } + + // update our allocation step + client.allocationStep = client.client.nextAllocationStep(client.allocationStep) + } + } + + // we have data, no need to wait for a chunk + if read > 0 { + return read, nil + } + + // we don't have data, wait for a chunk from storage node + response, err := client.stream.Recv() + if response != nil && response.Chunk != nil { + client.downloaded += int64(len(response.Chunk.Data)) + client.unread.Fill(response.Chunk.Data) + } + // This is a GET_REPAIR because we got a piece hash and the original order limit. + if response != nil && response.Hash != nil && response.Limit != nil { + client.hash = response.Hash + client.originLimit = response.Limit + } + + // we may have some data buffered, so we cannot immediately return the error + // we'll queue the error and use the received error as the closing error + if err != nil { + client.unread.IncludeError(err) + client.handleClosingError(err) + } + } + + // all downloaded + if read == 0 { + return 0, io.EOF + } + return read, nil +} + +// handleClosingError should be used for an error that also closed the stream. +func (client *Download) handleClosingError(err error) { + if client.closed { + return + } + client.closed = true + client.closingError = err +} + +// closeWithError is used when we include the err in the closing error and also close the stream. +func (client *Download) closeWithError(err error) { + if client.closed { + return + } + client.closed = true + client.closingError = errs.Combine(err, client.stream.CloseSend()) +} + +// closeAndTryFetchError closes the stream and also tries to fetch the actual error from the stream. +func (client *Download) closeAndTryFetchError() { + if client.closed { + return + } + client.closed = true + + client.closingError = client.stream.CloseSend() + if client.closingError == nil || client.closingError == io.EOF { + _, client.closingError = client.stream.Recv() + } +} + +// Close closes the downloading. +func (client *Download) Close() (err error) { + defer func() { + if err != nil { + details := errs.Class(fmt.Sprintf("(Node ID: %s, Piece ID: %s)", client.peer.ID.String(), client.limit.PieceId.String())) + err = details.Wrap(err) + err = Error.Wrap(err) + } + }() + + client.closeWithError(nil) + return client.closingError +} + +// GetHashAndLimit gets the download's hash and original order limit. +func (client *Download) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) { + return client.hash, client.originLimit +} + +// ReadBuffer implements buffered reading with an error. +type ReadBuffer struct { + data []byte + err error +} + +// Error returns an error if it was encountered. +func (buffer *ReadBuffer) Error() error { return buffer.err } + +// Errored returns whether the buffer contains an error. +func (buffer *ReadBuffer) Errored() bool { return buffer.err != nil } + +// Empty checks whether buffer needs to be filled. +func (buffer *ReadBuffer) Empty() bool { + return len(buffer.data) == 0 && buffer.err == nil +} + +// IncludeError adds error at the end of the buffer. +func (buffer *ReadBuffer) IncludeError(err error) { + buffer.err = errs.Combine(buffer.err, err) +} + +// Fill fills the buffer with the specified bytes. +func (buffer *ReadBuffer) Fill(data []byte) { + buffer.data = data +} + +// Read reads from the buffer. +func (buffer *ReadBuffer) Read(data []byte) (n int, err error) { + if len(buffer.data) > 0 { + n = copy(data, buffer.data) + buffer.data = buffer.data[n:] + return n, nil + } + + if buffer.err != nil { + return 0, buffer.err + } + + return 0, nil +} diff --git a/vendor/storj.io/uplink/private/piecestore/upload.go b/vendor/storj.io/uplink/private/piecestore/upload.go new file mode 100644 index 000000000..3ade3b783 --- /dev/null +++ b/vendor/storj.io/uplink/private/piecestore/upload.go @@ -0,0 +1,261 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package piecestore + +import ( + "context" + "hash" + "io" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + + "storj.io/common/identity" + "storj.io/common/pb" + "storj.io/common/pkcrypto" + "storj.io/common/signing" + "storj.io/common/storj" + "storj.io/common/sync2" +) + +var mon = monkit.Package() + +// Uploader defines the interface for uploading a piece. +type Uploader interface { + // Write uploads data to the storage node. + Write([]byte) (int, error) + // Cancel cancels the upload. + Cancel(context.Context) error + // Commit finalizes the upload. + Commit(context.Context) (*pb.PieceHash, error) +} + +// Upload implements uploading to the storage node. +type Upload struct { + client *Client + limit *pb.OrderLimit + privateKey storj.PiecePrivateKey + peer *identity.PeerIdentity + stream uploadStream + ctx context.Context + + hash hash.Hash // TODO: use concrete implementation + offset int64 + allocationStep int64 + + // when there's a send error then it will automatically close + finished bool + sendError error +} + +type uploadStream interface { + Context() context.Context + CloseSend() error + Send(*pb.PieceUploadRequest) error + CloseAndRecv() (*pb.PieceUploadResponse, error) +} + +// UploadReader uploads a reader to the storage node. +func (client *Client) UploadReader(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey, data io.Reader) (hash *pb.PieceHash, err error) { + // UploadReader is implemented using deprecated Upload until we can get everything + // to switch to UploadReader directly. + + upload, err := client.Upload(ctx, limit, piecePrivateKey) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + err = errs.Combine(err, upload.Cancel(ctx)) + return + } + hash, err = upload.Commit(ctx) + }() + + _, err = sync2.Copy(ctx, upload, data) + return nil, err +} + +// Upload is deprecated and will be removed. Please use UploadReader. +func (client *Client) Upload(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey) (_ Uploader, err error) { + defer mon.Task()(&ctx, "node: "+limit.StorageNodeId.String()[0:8])(&err) + + peer, err := client.conn.PeerIdentity() + if err != nil { + return nil, ErrInternal.Wrap(err) + } + + stream, err := client.client.Upload(ctx) + if err != nil { + return nil, err + } + + err = stream.Send(&pb.PieceUploadRequest{ + Limit: limit, + }) + if err != nil { + _, closeErr := stream.CloseAndRecv() + switch { + case err != io.EOF && closeErr != nil: + err = ErrProtocol.Wrap(errs.Combine(err, closeErr)) + case closeErr != nil: + err = ErrProtocol.Wrap(closeErr) + } + + return nil, err + } + + upload := &Upload{ + client: client, + limit: limit, + privateKey: piecePrivateKey, + peer: peer, + stream: stream, + ctx: ctx, + + hash: pkcrypto.NewHash(), + offset: 0, + allocationStep: client.config.InitialStep, + } + + if client.config.UploadBufferSize <= 0 { + return &LockingUpload{upload: upload}, nil + } + return &LockingUpload{ + upload: NewBufferedUpload(upload, int(client.config.UploadBufferSize)), + }, nil +} + +// Write sends data to the storagenode allocating as necessary. +func (client *Upload) Write(data []byte) (written int, err error) { + ctx := client.ctx + defer mon.Task()(&ctx, "node: "+client.peer.ID.String()[0:8])(&err) + + if client.finished { + return 0, io.EOF + } + // if we already encountered an error, keep returning it + if client.sendError != nil { + return 0, client.sendError + } + + fullData := data + defer func() { + // write the hash of the data sent to the server + // guaranteed not to return error + _, _ = client.hash.Write(fullData[:written]) + }() + + for len(data) > 0 { + // pick a data chunk to send + var sendData []byte + if client.allocationStep < int64(len(data)) { + sendData, data = data[:client.allocationStep], data[client.allocationStep:] + } else { + sendData, data = data, nil + } + + // create a signed order for the next chunk + order, err := signing.SignUplinkOrder(ctx, client.privateKey, &pb.Order{ + SerialNumber: client.limit.SerialNumber, + Amount: client.offset + int64(len(sendData)), + }) + if err != nil { + return written, ErrInternal.Wrap(err) + } + + // send signed order + data + err = client.stream.Send(&pb.PieceUploadRequest{ + Order: order, + Chunk: &pb.PieceUploadRequest_Chunk{ + Offset: client.offset, + Data: sendData, + }, + }) + if err != nil { + _, closeErr := client.stream.CloseAndRecv() + switch { + case err != io.EOF && closeErr != nil: + err = ErrProtocol.Wrap(errs.Combine(err, closeErr)) + case closeErr != nil: + err = ErrProtocol.Wrap(closeErr) + } + + client.sendError = err + return written, err + } + + // update our offset + client.offset += int64(len(sendData)) + written += len(sendData) + + // update allocation step, incrementally building trust + client.allocationStep = client.client.nextAllocationStep(client.allocationStep) + } + + return written, nil +} + +// Cancel cancels the uploading. +func (client *Upload) Cancel(ctx context.Context) (err error) { + defer mon.Task()(&ctx)(&err) + if client.finished { + return io.EOF + } + client.finished = true + return Error.Wrap(client.stream.CloseSend()) +} + +// Commit finishes uploading by sending the piece-hash and retrieving the piece-hash. +func (client *Upload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) { + defer mon.Task()(&ctx, "node: "+client.peer.ID.String()[0:8])(&err) + if client.finished { + return nil, io.EOF + } + client.finished = true + + if client.sendError != nil { + // something happened during sending, try to figure out what exactly + // since sendError was already reported, we don't need to rehandle it. + _, closeErr := client.stream.CloseAndRecv() + return nil, Error.Wrap(closeErr) + } + + // sign the hash for storage node + uplinkHash, err := signing.SignUplinkPieceHash(ctx, client.privateKey, &pb.PieceHash{ + PieceId: client.limit.PieceId, + PieceSize: client.offset, + Hash: client.hash.Sum(nil), + Timestamp: client.limit.OrderCreation, + }) + if err != nil { + // failed to sign, let's close the sending side, no need to wait for a response + closeErr := client.stream.CloseSend() + // closeErr being io.EOF doesn't inform us about anything + return nil, Error.Wrap(errs.Combine(err, ignoreEOF(closeErr))) + } + + // exchange signed piece hashes + // 1. send our piece hash + sendErr := client.stream.Send(&pb.PieceUploadRequest{ + Done: uplinkHash, + }) + + // 2. wait for a piece hash as a response + response, closeErr := client.stream.CloseAndRecv() + if response == nil || response.Done == nil { + // combine all the errors from before + // sendErr is io.EOF when failed to send, so don't care + // closeErr is io.EOF when storage node closed before sending us a response + return nil, errs.Combine(ErrProtocol.New("expected piece hash"), ignoreEOF(sendErr), ignoreEOF(closeErr)) + } + + // verification + verifyErr := client.client.VerifyPieceHash(client.stream.Context(), client.peer, client.limit, response.Done, uplinkHash.Hash) + + // combine all the errors from before + // sendErr is io.EOF when we failed to send + // closeErr is io.EOF when storage node closed properly + return response.Done, errs.Combine(verifyErr, ignoreEOF(sendErr), ignoreEOF(closeErr)) +} diff --git a/vendor/storj.io/uplink/private/piecestore/verification.go b/vendor/storj.io/uplink/private/piecestore/verification.go new file mode 100644 index 000000000..c8cae8455 --- /dev/null +++ b/vendor/storj.io/uplink/private/piecestore/verification.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package piecestore + +import ( + "bytes" + "context" + "time" + + "github.com/zeebo/errs" + + "storj.io/common/identity" + "storj.io/common/pb" + "storj.io/common/signing" +) + +const pieceHashExpiration = 24 * time.Hour + +var ( + // ErrInternal is an error class for internal errors. + ErrInternal = errs.Class("internal") + // ErrProtocol is an error class for unexpected protocol sequence. + ErrProtocol = errs.Class("protocol") + // ErrVerifyUntrusted is an error in case there is a trust issue. + ErrVerifyUntrusted = errs.Class("untrusted") + // ErrStorageNodeInvalidResponse is an error when a storage node returns a response with invalid data + ErrStorageNodeInvalidResponse = errs.Class("storage node has returned an invalid response") +) + +// VerifyPieceHash verifies piece hash which is sent by peer. +func (client *Client) VerifyPieceHash(ctx context.Context, peer *identity.PeerIdentity, limit *pb.OrderLimit, hash *pb.PieceHash, expectedHash []byte) (err error) { + defer mon.Task()(&ctx)(&err) + if peer == nil || limit == nil || hash == nil || len(expectedHash) == 0 { + return ErrProtocol.New("invalid arguments") + } + if limit.PieceId != hash.PieceId { + return ErrProtocol.New("piece id changed") // TODO: report rpc status bad message + } + if !bytes.Equal(hash.Hash, expectedHash) { + return ErrVerifyUntrusted.New("hashes don't match") // TODO: report rpc status bad message + } + + if err := signing.VerifyPieceHashSignature(ctx, signing.SigneeFromPeerIdentity(peer), hash); err != nil { + return ErrVerifyUntrusted.New("invalid hash signature: %v", err) // TODO: report rpc status bad message + } + + if hash.Timestamp.Before(time.Now().Add(-pieceHashExpiration)) { + return ErrStorageNodeInvalidResponse.New("piece has timestamp is too old (%v). Required to be not older than %s", + hash.Timestamp, pieceHashExpiration, + ) + } + + return nil +} diff --git a/vendor/storj.io/uplink/private/storage/segments/common.go b/vendor/storj.io/uplink/private/storage/segments/common.go new file mode 100644 index 000000000..dacf74239 --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/segments/common.go @@ -0,0 +1,11 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package segments + +import ( + "github.com/zeebo/errs" +) + +// Error is the errs class of standard segment errors +var Error = errs.Class("segment error") diff --git a/vendor/storj.io/uplink/private/storage/segments/peek.go b/vendor/storj.io/uplink/private/storage/segments/peek.go new file mode 100644 index 000000000..6018752ad --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/segments/peek.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package segments + +import "io" + +// PeekThresholdReader allows a check to see if the size of a given reader +// exceeds the maximum inline segment size or not. +type PeekThresholdReader struct { + r io.Reader + thresholdBuf []byte + isLargerCalled bool + readCalled bool +} + +// NewPeekThresholdReader creates a new instance of PeekThresholdReader +func NewPeekThresholdReader(r io.Reader) (pt *PeekThresholdReader) { + return &PeekThresholdReader{r: r} +} + +// Read initially reads bytes from the internal buffer, then continues +// reading from the wrapped data reader. The number of bytes read `n` +// is returned. +func (pt *PeekThresholdReader) Read(p []byte) (n int, err error) { + pt.readCalled = true + + if len(pt.thresholdBuf) == 0 { + return pt.r.Read(p) + } + + n = copy(p, pt.thresholdBuf) + pt.thresholdBuf = pt.thresholdBuf[n:] + return n, nil +} + +// IsLargerThan returns a bool to determine whether a reader's size +// is larger than the given threshold or not. +func (pt *PeekThresholdReader) IsLargerThan(thresholdSize int) (bool, error) { + if pt.isLargerCalled { + return false, Error.New("IsLargerThan can't be called more than once") + } + if pt.readCalled { + return false, Error.New("IsLargerThan can't be called after Read has been called") + } + pt.isLargerCalled = true + buf := make([]byte, thresholdSize+1) + n, err := io.ReadFull(pt.r, buf) + pt.thresholdBuf = buf[:n] + if err == io.EOF || err == io.ErrUnexpectedEOF { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/storj.io/uplink/private/storage/segments/size.go b/vendor/storj.io/uplink/private/storage/segments/size.go new file mode 100644 index 000000000..90208f375 --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/segments/size.go @@ -0,0 +1,29 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package segments + +import ( + "io" +) + +// SizedReader allows to check the total number of bytes read so far. +type SizedReader struct { + r io.Reader + size int64 +} + +// SizeReader create a new instance of SizedReader. +func SizeReader(r io.Reader) *SizedReader { + return &SizedReader{r: r} +} + +// Read implements io.Reader.Read +func (r *SizedReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + r.size += int64(n) + return n, err +} + +// Size returns the total number of bytes read so far. +func (r *SizedReader) Size() int64 { return r.size } diff --git a/vendor/storj.io/uplink/private/storage/segments/store.go b/vendor/storj.io/uplink/private/storage/segments/store.go new file mode 100644 index 000000000..e7701f84e --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/segments/store.go @@ -0,0 +1,131 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package segments + +import ( + "context" + "io" + "math/rand" + "sync" + "time" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/vivint/infectious" + + "storj.io/common/pb" + "storj.io/common/ranger" + "storj.io/common/storj" + "storj.io/uplink/private/ecclient" + "storj.io/uplink/private/eestream" + "storj.io/uplink/private/metainfo" +) + +var ( + mon = monkit.Package() +) + +// Meta info about a segment +type Meta struct { + Modified time.Time + Expiration time.Time + Size int64 + Data []byte +} + +// Store for segments +type Store interface { + // Ranger creates a ranger for downloading erasure codes from piece store nodes. + Ranger(ctx context.Context, info storj.SegmentDownloadInfo, limits []*pb.AddressedOrderLimit, objectRS storj.RedundancyScheme) (ranger.Ranger, error) + Put(ctx context.Context, data io.Reader, expiration time.Time, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy) (_ []*pb.SegmentPieceUploadResult, size int64, err error) +} + +type segmentStore struct { + metainfo *metainfo.Client + ec ecclient.Client + rngMu sync.Mutex + rng *rand.Rand +} + +// NewSegmentStore creates a new instance of segmentStore +func NewSegmentStore(metainfo *metainfo.Client, ec ecclient.Client) Store { + return &segmentStore{ + metainfo: metainfo, + ec: ec, + rng: rand.New(rand.NewSource(time.Now().UnixNano())), + } +} + +// Put uploads a segment to an erasure code client +func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.Time, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy) (_ []*pb.SegmentPieceUploadResult, size int64, err error) { + defer mon.Task()(&ctx)(&err) + + sizedReader := SizeReader(NewPeekThresholdReader(data)) + successfulNodes, successfulHashes, err := s.ec.Put(ctx, limits, piecePrivateKey, rs, sizedReader, expiration) + if err != nil { + return nil, size, Error.Wrap(err) + } + + uploadResults := make([]*pb.SegmentPieceUploadResult, 0, len(successfulNodes)) + for i := range successfulNodes { + if successfulNodes[i] == nil { + continue + } + uploadResults = append(uploadResults, &pb.SegmentPieceUploadResult{ + PieceNum: int32(i), + NodeId: successfulNodes[i].Id, + Hash: successfulHashes[i], + }) + } + + if l := len(uploadResults); l < rs.OptimalThreshold() { + return nil, size, Error.New("uploaded results (%d) are below the optimal threshold (%d)", l, rs.OptimalThreshold()) + } + + return uploadResults, sizedReader.Size(), nil +} + +// Ranger creates a ranger for downloading erasure codes from piece store nodes. +func (s *segmentStore) Ranger( + ctx context.Context, info storj.SegmentDownloadInfo, limits []*pb.AddressedOrderLimit, objectRS storj.RedundancyScheme, +) (rr ranger.Ranger, err error) { + defer mon.Task()(&ctx, info, limits, objectRS)(&err) + + // no order limits also means its inline segment + if len(info.EncryptedInlineData) != 0 || len(limits) == 0 { + return ranger.ByteRanger(info.EncryptedInlineData), nil + } + + needed := objectRS.DownloadNodes() + selected := make([]*pb.AddressedOrderLimit, len(limits)) + s.rngMu.Lock() + perm := s.rng.Perm(len(limits)) + s.rngMu.Unlock() + + for _, i := range perm { + limit := limits[i] + if limit == nil { + continue + } + + selected[i] = limit + + needed-- + if needed <= 0 { + break + } + } + + fc, err := infectious.NewFEC(int(objectRS.RequiredShares), int(objectRS.TotalShares)) + if err != nil { + return nil, err + } + es := eestream.NewRSScheme(fc, int(objectRS.ShareSize)) + redundancy, err := eestream.NewRedundancyStrategy(es, int(objectRS.RepairShares), int(objectRS.OptimalShares)) + if err != nil { + return nil, err + } + + rr, err = s.ec.Get(ctx, selected, info.PiecePrivateKey, redundancy, info.Size) + return rr, Error.Wrap(err) +} diff --git a/vendor/storj.io/uplink/private/storage/streams/eof.go b/vendor/storj.io/uplink/private/storage/streams/eof.go new file mode 100644 index 000000000..a68c43ca6 --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/streams/eof.go @@ -0,0 +1,36 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package streams + +import "io" + +// EOFReader holds reader and status of EOF +type EOFReader struct { + reader io.Reader + eof bool + err error +} + +// NewEOFReader keeps track of the state, has the internal reader reached EOF +func NewEOFReader(r io.Reader) *EOFReader { + return &EOFReader{reader: r} +} + +func (r *EOFReader) Read(p []byte) (n int, err error) { + n, err = r.reader.Read(p) + if err == io.EOF { + r.eof = true + } else if err != nil && r.err == nil { + r.err = err + } + return n, err +} + +func (r *EOFReader) isEOF() bool { + return r.eof +} + +func (r *EOFReader) hasError() bool { + return r.err != nil +} diff --git a/vendor/storj.io/uplink/private/storage/streams/path.go b/vendor/storj.io/uplink/private/storage/streams/path.go new file mode 100644 index 000000000..d041a79d4 --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/streams/path.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package streams + +import ( + "strings" + + "storj.io/common/paths" + "storj.io/common/storj" +) + +// Path is a representation of an object path within a bucket +type Path struct { + bucket string + unencPath paths.Unencrypted + raw []byte +} + +// Bucket returns the bucket part of the path. +func (p Path) Bucket() string { return p.bucket } + +// UnencryptedPath returns the unencrypted path part of the path. +func (p Path) UnencryptedPath() paths.Unencrypted { return p.unencPath } + +// Raw returns the raw data in the path. +func (p Path) Raw() []byte { return append([]byte(nil), p.raw...) } + +// String returns the string form of the raw data in the path. +func (p Path) String() string { return string(p.raw) } + +// ParsePath returns a new Path with the given raw bytes. +func ParsePath(raw storj.Path) (path Path) { + // A path may contain a bucket and an unencrypted path. + parts := strings.SplitN(raw, "/", 2) + path.bucket = parts[0] + if len(parts) > 1 { + path.unencPath = paths.NewUnencrypted(parts[1]) + } + path.raw = []byte(raw) + return path +} + +// CreatePath will create a Path for the provided information. +func CreatePath(bucket string, unencPath paths.Unencrypted) (path Path) { + path.bucket = bucket + path.unencPath = unencPath + + path.raw = append(path.raw, bucket...) + if unencPath.Valid() { + path.raw = append(path.raw, '/') + path.raw = append(path.raw, unencPath.Raw()...) + } + + return path +} + +// PathForKey removes the trailing `/` from the raw path, which is required so +// the derived key matches the final list path (which also has the trailing +// encrypted `/` part of the path removed). +func PathForKey(raw string) paths.Unencrypted { + return paths.NewUnencrypted(strings.TrimSuffix(raw, "/")) +} diff --git a/vendor/storj.io/uplink/private/storage/streams/shim.go b/vendor/storj.io/uplink/private/storage/streams/shim.go new file mode 100644 index 000000000..a08df0265 --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/streams/shim.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package streams + +import ( + "context" + "io" + "time" + + "storj.io/common/encryption" + "storj.io/common/ranger" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/storage/segments" +) + +// Metadata interface returns the latest metadata for an object. +type Metadata interface { + Metadata() ([]byte, error) +} + +// Store interface methods for streams to satisfy to be a store +type Store interface { + Get(ctx context.Context, path storj.Path, object storj.Object) (ranger.Ranger, error) + Put(ctx context.Context, path storj.Path, data io.Reader, metadata Metadata, expiration time.Time) (Meta, error) + Delete(ctx context.Context, path storj.Path) (storj.ObjectInfo, error) +} + +type shimStore struct { + store typedStore +} + +// NewStreamStore constructs a Store. +func NewStreamStore(metainfo *metainfo.Client, segments segments.Store, segmentSize int64, encStore *encryption.Store, encBlockSize int, cipher storj.CipherSuite, inlineThreshold int, maxEncryptedSegmentSize int64) (Store, error) { + typedStore, err := newTypedStreamStore(metainfo, segments, segmentSize, encStore, encBlockSize, cipher, inlineThreshold, maxEncryptedSegmentSize) + if err != nil { + return nil, err + } + return &shimStore{store: typedStore}, nil +} + +// Get parses the passed in path and dispatches to the typed store. +func (s *shimStore) Get(ctx context.Context, path storj.Path, object storj.Object) (_ ranger.Ranger, err error) { + defer mon.Task()(&ctx)(&err) + + return s.store.Get(ctx, ParsePath(path), object) +} + +// Put parses the passed in path and dispatches to the typed store. +func (s *shimStore) Put(ctx context.Context, path storj.Path, data io.Reader, metadata Metadata, expiration time.Time) (_ Meta, err error) { + defer mon.Task()(&ctx)(&err) + + return s.store.Put(ctx, ParsePath(path), data, metadata, expiration) +} + +// Delete parses the passed in path and dispatches to the typed store. +func (s *shimStore) Delete(ctx context.Context, path storj.Path) (_ storj.ObjectInfo, err error) { + defer mon.Task()(&ctx)(&err) + + return s.store.Delete(ctx, ParsePath(path)) +} diff --git a/vendor/storj.io/uplink/private/storage/streams/size.go b/vendor/storj.io/uplink/private/storage/streams/size.go new file mode 100644 index 000000000..5776a442c --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/streams/size.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package streams + +import "io" + +// SizeReader holds reader and size read so far +type SizeReader struct { + reader io.Reader + size int64 +} + +// NewSizeReader keeps track of how much bytes are read from the reader +func NewSizeReader(r io.Reader) *SizeReader { + return &SizeReader{reader: r} +} + +func (r *SizeReader) Read(p []byte) (n int, err error) { + n, err = r.reader.Read(p) + r.size += int64(n) + return n, err +} + +// Size returns the number of bytes read so far +func (r *SizeReader) Size() int64 { + return r.size +} diff --git a/vendor/storj.io/uplink/private/storage/streams/store.go b/vendor/storj.io/uplink/private/storage/streams/store.go new file mode 100644 index 000000000..a10f47c19 --- /dev/null +++ b/vendor/storj.io/uplink/private/storage/streams/store.go @@ -0,0 +1,585 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package streams + +import ( + "context" + "crypto/rand" + "io" + "io/ioutil" + "time" + + "github.com/spacemonkeygo/monkit/v3" + "github.com/zeebo/errs" + "go.uber.org/zap" + + "storj.io/common/encryption" + "storj.io/common/pb" + "storj.io/common/ranger" + "storj.io/common/storj" + "storj.io/uplink/private/eestream" + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/storage/segments" +) + +var mon = monkit.Package() + +// Meta info about a stream +type Meta struct { + Modified time.Time + Expiration time.Time + Size int64 + Data []byte +} + +// Store interface methods for streams to satisfy to be a store +type typedStore interface { + Get(ctx context.Context, path Path, object storj.Object) (ranger.Ranger, error) + Put(ctx context.Context, path Path, data io.Reader, metadata Metadata, expiration time.Time) (Meta, error) + Delete(ctx context.Context, path Path) (storj.ObjectInfo, error) +} + +// streamStore is a store for streams. It implements typedStore as part of an ongoing migration +// to use typed paths. See the shim for the store that the rest of the world interacts with. +type streamStore struct { + metainfo *metainfo.Client + segments segments.Store + segmentSize int64 + encStore *encryption.Store + encBlockSize int + cipher storj.CipherSuite + inlineThreshold int + maxEncryptedSegmentSize int64 +} + +// newTypedStreamStore constructs a typedStore backed by a streamStore. +func newTypedStreamStore(metainfo *metainfo.Client, segments segments.Store, segmentSize int64, encStore *encryption.Store, encBlockSize int, cipher storj.CipherSuite, inlineThreshold int, maxEncryptedSegmentSize int64) (typedStore, error) { + if segmentSize <= 0 { + return nil, errs.New("segment size must be larger than 0") + } + if encBlockSize <= 0 { + return nil, errs.New("encryption block size must be larger than 0") + } + + return &streamStore{ + metainfo: metainfo, + segments: segments, + segmentSize: segmentSize, + encStore: encStore, + encBlockSize: encBlockSize, + cipher: cipher, + inlineThreshold: inlineThreshold, + maxEncryptedSegmentSize: maxEncryptedSegmentSize, + }, nil +} + +// Put breaks up data as it comes in into s.segmentSize length pieces, then +// store the first piece at s0/, second piece at s1/, and the +// *last* piece at l/. Store the given metadata, along with the number +// of segments, in a new protobuf, in the metadata of l/. +// +// If there is an error, it cleans up any uploaded segment before returning. +func (s *streamStore) Put(ctx context.Context, path Path, data io.Reader, metadata Metadata, expiration time.Time) (_ Meta, err error) { + defer mon.Task()(&ctx)(&err) + derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), s.encStore) + if err != nil { + return Meta{}, err + } + encPath, err := encryption.EncryptPathWithStoreCipher(path.Bucket(), path.UnencryptedPath(), s.encStore) + if err != nil { + return Meta{}, err + } + + beginObjectReq := &metainfo.BeginObjectParams{ + Bucket: []byte(path.Bucket()), + EncryptedPath: []byte(encPath.Raw()), + ExpiresAt: expiration, + } + + var ( + streamID storj.StreamID + ) + defer func() { + if err != nil { + s.cancelHandler(context.Background(), path) + return + } + + select { + case <-ctx.Done(): + s.cancelHandler(context.Background(), path) + default: + } + }() + + var ( + currentSegment int64 + contentKey storj.Key + streamSize int64 + lastSegmentSize int64 + encryptedKey []byte + keyNonce storj.Nonce + rs eestream.RedundancyStrategy + + requestsToBatch = make([]metainfo.BatchItem, 0, 2) + ) + + eofReader := NewEOFReader(data) + for !eofReader.isEOF() && !eofReader.hasError() { + // generate random key for encrypting the segment's content + _, err := rand.Read(contentKey[:]) + if err != nil { + return Meta{}, err + } + + // Initialize the content nonce with the current total segment incremented + // by 1 because at this moment the next segment has not been already + // uploaded. + // The increment by 1 is to avoid nonce reuse with the metadata encryption, + // which is encrypted with the zero nonce. + contentNonce := storj.Nonce{} + _, err = encryption.Increment(&contentNonce, currentSegment+1) + if err != nil { + return Meta{}, err + } + + // generate random nonce for encrypting the content key + _, err = rand.Read(keyNonce[:]) + if err != nil { + return Meta{}, err + } + + encryptedKey, err = encryption.EncryptKey(&contentKey, s.cipher, derivedKey, &keyNonce) + if err != nil { + return Meta{}, err + } + + sizeReader := NewSizeReader(eofReader) + segmentReader := io.LimitReader(sizeReader, s.segmentSize) + peekReader := segments.NewPeekThresholdReader(segmentReader) + // If the data is larger than the inline threshold size, then it will be a remote segment + isRemote, err := peekReader.IsLargerThan(s.inlineThreshold) + if err != nil { + return Meta{}, err + } + + segmentEncryption := storj.SegmentEncryption{} + if s.cipher != storj.EncNull { + segmentEncryption = storj.SegmentEncryption{ + EncryptedKey: encryptedKey, + EncryptedKeyNonce: keyNonce, + } + } + + if isRemote { + encrypter, err := encryption.NewEncrypter(s.cipher, &contentKey, &contentNonce, s.encBlockSize) + if err != nil { + return Meta{}, err + } + + paddedReader := encryption.PadReader(ioutil.NopCloser(peekReader), encrypter.InBlockSize()) + transformedReader := encryption.TransformReader(paddedReader, encrypter, 0) + + beginSegment := &metainfo.BeginSegmentParams{ + MaxOrderLimit: s.maxEncryptedSegmentSize, + Position: storj.SegmentPosition{ + Index: int32(currentSegment), + }, + } + + var responses []metainfo.BatchResponse + if currentSegment == 0 { + responses, err = s.metainfo.Batch(ctx, beginObjectReq, beginSegment) + if err != nil { + return Meta{}, err + } + objResponse, err := responses[0].BeginObject() + if err != nil { + return Meta{}, err + } + streamID = objResponse.StreamID + rs = objResponse.RedundancyStrategy + } else { + beginSegment.StreamID = streamID + responses, err = s.metainfo.Batch(ctx, append(requestsToBatch, beginSegment)...) + requestsToBatch = requestsToBatch[:0] + if err != nil { + return Meta{}, err + } + } + + segResponse, err := responses[1].BeginSegment() + if err != nil { + return Meta{}, err + } + segmentID := segResponse.SegmentID + limits := segResponse.Limits + piecePrivateKey := segResponse.PiecePrivateKey + + uploadResults, size, err := s.segments.Put(ctx, transformedReader, expiration, limits, piecePrivateKey, rs) + if err != nil { + return Meta{}, err + } + + requestsToBatch = append(requestsToBatch, &metainfo.CommitSegmentParams{ + SegmentID: segmentID, + SizeEncryptedData: size, + Encryption: segmentEncryption, + UploadResult: uploadResults, + }) + } else { + data, err := ioutil.ReadAll(peekReader) + if err != nil { + return Meta{}, err + } + cipherData, err := encryption.Encrypt(data, s.cipher, &contentKey, &contentNonce) + if err != nil { + return Meta{}, err + } + + makeInlineSegment := &metainfo.MakeInlineSegmentParams{ + Position: storj.SegmentPosition{ + Index: int32(currentSegment), + }, + Encryption: segmentEncryption, + EncryptedInlineData: cipherData, + } + if currentSegment == 0 { + responses, err := s.metainfo.Batch(ctx, beginObjectReq, makeInlineSegment) + if err != nil { + return Meta{}, err + } + objResponse, err := responses[0].BeginObject() + if err != nil { + return Meta{}, err + } + streamID = objResponse.StreamID + } else { + makeInlineSegment.StreamID = streamID + requestsToBatch = append(requestsToBatch, makeInlineSegment) + } + } + + lastSegmentSize = sizeReader.Size() + streamSize += lastSegmentSize + currentSegment++ + } + + totalSegments := currentSegment + + if eofReader.hasError() { + return Meta{}, eofReader.err + } + + metadataBytes, err := metadata.Metadata() + if err != nil { + return Meta{}, err + } + + streamInfo, err := pb.Marshal(&pb.StreamInfo{ + DeprecatedNumberOfSegments: totalSegments, + SegmentsSize: s.segmentSize, + LastSegmentSize: lastSegmentSize, + Metadata: metadataBytes, + }) + if err != nil { + return Meta{}, err + } + + // encrypt metadata with the content encryption key and zero nonce + encryptedStreamInfo, err := encryption.Encrypt(streamInfo, s.cipher, &contentKey, &storj.Nonce{}) + if err != nil { + return Meta{}, err + } + + streamMeta := pb.StreamMeta{ + NumberOfSegments: totalSegments, + EncryptedStreamInfo: encryptedStreamInfo, + EncryptionType: int32(s.cipher), + EncryptionBlockSize: int32(s.encBlockSize), + } + + if s.cipher != storj.EncNull { + streamMeta.LastSegmentMeta = &pb.SegmentMeta{ + EncryptedKey: encryptedKey, + KeyNonce: keyNonce[:], + } + } + + objectMetadata, err := pb.Marshal(&streamMeta) + if err != nil { + return Meta{}, err + } + + commitObject := metainfo.CommitObjectParams{ + StreamID: streamID, + EncryptedMetadata: objectMetadata, + } + if len(requestsToBatch) > 0 { + _, err = s.metainfo.Batch(ctx, append(requestsToBatch, &commitObject)...) + } else { + err = s.metainfo.CommitObject(ctx, commitObject) + } + if err != nil { + return Meta{}, err + } + + satStreamID := &pb.SatStreamID{} + err = pb.Unmarshal(streamID, satStreamID) + if err != nil { + return Meta{}, err + } + + resultMeta := Meta{ + Modified: satStreamID.CreationDate, + Expiration: expiration, + Size: streamSize, + Data: metadataBytes, + } + + return resultMeta, nil +} + +// Get returns a ranger that knows what the overall size is (from l/) +// and then returns the appropriate data from segments s0/, s1/, +// ..., l/. +func (s *streamStore) Get(ctx context.Context, path Path, object storj.Object) (rr ranger.Ranger, err error) { + defer mon.Task()(&ctx)(&err) + + info, limits, err := s.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{ + StreamID: object.ID, + Position: storj.SegmentPosition{ + Index: -1, // Request the last segment + }, + }) + if err != nil { + return nil, err + } + + lastSegmentRanger, err := s.segments.Ranger(ctx, info, limits, object.RedundancyScheme) + if err != nil { + return nil, err + } + + derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), s.encStore) + if err != nil { + return nil, err + } + + var rangers []ranger.Ranger + for i := int64(0); i < object.SegmentCount-1; i++ { + var contentNonce storj.Nonce + _, err = encryption.Increment(&contentNonce, i+1) + if err != nil { + return nil, err + } + + rangers = append(rangers, &lazySegmentRanger{ + metainfo: s.metainfo, + segments: s.segments, + streamID: object.ID, + segmentIndex: int32(i), + rs: object.RedundancyScheme, + size: object.FixedSegmentSize, + derivedKey: derivedKey, + startingNonce: &contentNonce, + encBlockSize: int(object.EncryptionParameters.BlockSize), + cipher: object.CipherSuite, + }) + } + + var contentNonce storj.Nonce + _, err = encryption.Increment(&contentNonce, object.SegmentCount) + if err != nil { + return nil, err + } + + decryptedLastSegmentRanger, err := decryptRanger( + ctx, + lastSegmentRanger, + object.LastSegment.Size, + object.CipherSuite, + derivedKey, + info.SegmentEncryption.EncryptedKey, + &info.SegmentEncryption.EncryptedKeyNonce, + &contentNonce, + int(object.EncryptionParameters.BlockSize), + ) + if err != nil { + return nil, err + } + + rangers = append(rangers, decryptedLastSegmentRanger) + return ranger.Concat(rangers...), nil +} + +// Delete all the segments, with the last one last +func (s *streamStore) Delete(ctx context.Context, path Path) (_ storj.ObjectInfo, err error) { + defer mon.Task()(&ctx)(&err) + + encPath, err := encryption.EncryptPathWithStoreCipher(path.Bucket(), path.UnencryptedPath(), s.encStore) + if err != nil { + return storj.ObjectInfo{}, err + } + + _, object, err := s.metainfo.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{ + Bucket: []byte(path.Bucket()), + EncryptedPath: []byte(encPath.Raw()), + }) + return object, err +} + +// ListItem is a single item in a listing +type ListItem struct { + Path string + Meta Meta + IsPrefix bool +} + +type lazySegmentRanger struct { + ranger ranger.Ranger + metainfo *metainfo.Client + segments segments.Store + streamID storj.StreamID + segmentIndex int32 + rs storj.RedundancyScheme + size int64 + derivedKey *storj.Key + startingNonce *storj.Nonce + encBlockSize int + cipher storj.CipherSuite +} + +// Size implements Ranger.Size. +func (lr *lazySegmentRanger) Size() int64 { + return lr.size +} + +// Range implements Ranger.Range to be lazily connected. +func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) { + defer mon.Task()(&ctx)(&err) + if lr.ranger == nil { + info, limits, err := lr.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{ + StreamID: lr.streamID, + Position: storj.SegmentPosition{ + Index: lr.segmentIndex, + }, + }) + if err != nil { + return nil, err + } + + rr, err := lr.segments.Ranger(ctx, info, limits, lr.rs) + if err != nil { + return nil, err + } + + encryptedKey, keyNonce := info.SegmentEncryption.EncryptedKey, info.SegmentEncryption.EncryptedKeyNonce + lr.ranger, err = decryptRanger(ctx, rr, lr.size, lr.cipher, lr.derivedKey, encryptedKey, &keyNonce, lr.startingNonce, lr.encBlockSize) + if err != nil { + return nil, err + } + } + return lr.ranger.Range(ctx, offset, length) +} + +// decryptRanger returns a decrypted ranger of the given rr ranger. +func decryptRanger(ctx context.Context, rr ranger.Ranger, decryptedSize int64, cipher storj.CipherSuite, derivedKey *storj.Key, encryptedKey storj.EncryptedPrivateKey, encryptedKeyNonce, startingNonce *storj.Nonce, encBlockSize int) (decrypted ranger.Ranger, err error) { + defer mon.Task()(&ctx)(&err) + contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, encryptedKeyNonce) + if err != nil { + return nil, err + } + + decrypter, err := encryption.NewDecrypter(cipher, contentKey, startingNonce, encBlockSize) + if err != nil { + return nil, err + } + + var rd ranger.Ranger + if rr.Size()%int64(decrypter.InBlockSize()) != 0 { + reader, err := rr.Range(ctx, 0, rr.Size()) + if err != nil { + return nil, err + } + defer func() { err = errs.Combine(err, reader.Close()) }() + cipherData, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + data, err := encryption.Decrypt(cipherData, cipher, contentKey, startingNonce) + if err != nil { + return nil, err + } + return ranger.ByteRanger(data), nil + } + + rd, err = encryption.Transform(rr, decrypter) + if err != nil { + return nil, err + } + return encryption.Unpad(rd, int(rd.Size()-decryptedSize)) +} + +// CancelHandler handles clean up of segments on receiving CTRL+C +func (s *streamStore) cancelHandler(ctx context.Context, path Path) { + defer mon.Task()(&ctx)(nil) + + // satellite deletes now from 0 to l so we can just use BeginDeleteObject + _, err := s.Delete(ctx, path) + if err != nil { + zap.L().Warn("Failed deleting object", zap.Stringer("path", path), zap.Error(err)) + } +} + +func getEncryptedKeyAndNonce(m *pb.SegmentMeta) (storj.EncryptedPrivateKey, *storj.Nonce) { + if m == nil { + return nil, nil + } + + var nonce storj.Nonce + copy(nonce[:], m.KeyNonce) + + return m.EncryptedKey, &nonce +} + +// TypedDecryptStreamInfo decrypts stream info +func TypedDecryptStreamInfo(ctx context.Context, streamMetaBytes []byte, path Path, encStore *encryption.Store) ( + _ *pb.StreamInfo, streamMeta pb.StreamMeta, err error) { + defer mon.Task()(&ctx)(&err) + + err = pb.Unmarshal(streamMetaBytes, &streamMeta) + if err != nil { + return nil, pb.StreamMeta{}, err + } + + if encStore.EncryptionBypass { + return nil, streamMeta, nil + } + + derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), encStore) + if err != nil { + return nil, pb.StreamMeta{}, err + } + + cipher := storj.CipherSuite(streamMeta.EncryptionType) + encryptedKey, keyNonce := getEncryptedKeyAndNonce(streamMeta.LastSegmentMeta) + contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, keyNonce) + if err != nil { + return nil, pb.StreamMeta{}, err + } + + // decrypt metadata with the content encryption key and zero nonce + streamInfo, err := encryption.Decrypt(streamMeta.EncryptedStreamInfo, cipher, contentKey, &storj.Nonce{}) + if err != nil { + return nil, pb.StreamMeta{}, err + } + + var stream pb.StreamInfo + if err := pb.Unmarshal(streamInfo, &stream); err != nil { + return nil, pb.StreamMeta{}, err + } + + return &stream, streamMeta, nil +} diff --git a/vendor/storj.io/uplink/private/stream/common.go b/vendor/storj.io/uplink/private/stream/common.go new file mode 100644 index 000000000..6fad5b54d --- /dev/null +++ b/vendor/storj.io/uplink/private/stream/common.go @@ -0,0 +1,11 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package stream + +import ( + "github.com/zeebo/errs" +) + +// Error is the errs class of stream errors +var Error = errs.Class("stream error") diff --git a/vendor/storj.io/uplink/private/stream/download.go b/vendor/storj.io/uplink/private/stream/download.go new file mode 100644 index 000000000..f1d230fca --- /dev/null +++ b/vendor/storj.io/uplink/private/stream/download.go @@ -0,0 +1,118 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package stream + +import ( + "context" + "io" + + "storj.io/common/storj" + "storj.io/uplink/private/metainfo/kvmetainfo" + "storj.io/uplink/private/storage/streams" +) + +// Download implements Reader, Seeker and Closer for reading from stream. +type Download struct { + ctx context.Context + stream kvmetainfo.ReadOnlyStream + streams streams.Store + reader io.ReadCloser + offset int64 + limit int64 + closed bool +} + +// NewDownload creates new stream download. +func NewDownload(ctx context.Context, stream kvmetainfo.ReadOnlyStream, streams streams.Store) *Download { + return &Download{ + ctx: ctx, + stream: stream, + streams: streams, + limit: -1, + } +} + +// NewDownloadRange creates new stream range download with range from offset to offset+limit. +func NewDownloadRange(ctx context.Context, stream kvmetainfo.ReadOnlyStream, streams streams.Store, offset, limit int64) *Download { + return &Download{ + ctx: ctx, + stream: stream, + streams: streams, + offset: offset, + limit: limit, + } +} + +// Read reads up to len(data) bytes into data. +// +// If this is the first call it will read from the beginning of the stream. +// Use Seek to change the current offset for the next Read call. +// +// See io.Reader for more details. +func (download *Download) Read(data []byte) (n int, err error) { + if download.closed { + return 0, Error.New("already closed") + } + + if download.reader == nil { + err = download.resetReader(download.offset) + if err != nil { + return 0, err + } + } + + if download.limit == 0 { + return 0, io.EOF + } + if download.limit > 0 && download.limit < int64(len(data)) { + data = data[:download.limit] + } + n, err = download.reader.Read(data) + if download.limit >= 0 { + download.limit -= int64(n) + } + download.offset += int64(n) + + return n, err +} + +// Close closes the stream and releases the underlying resources. +func (download *Download) Close() error { + if download.closed { + return Error.New("already closed") + } + + download.closed = true + + if download.reader == nil { + return nil + } + + return download.reader.Close() +} + +func (download *Download) resetReader(offset int64) error { + if download.reader != nil { + err := download.reader.Close() + if err != nil { + return err + } + } + + obj := download.stream.Info() + + rr, err := download.streams.Get(download.ctx, storj.JoinPaths(obj.Bucket.Name, obj.Path), obj) + if err != nil { + return err + } + + download.reader, err = rr.Range(download.ctx, offset, obj.Size-offset) + if err != nil { + return err + } + + download.offset = offset + + return nil +} diff --git a/vendor/storj.io/uplink/private/stream/upload.go b/vendor/storj.io/uplink/private/stream/upload.go new file mode 100644 index 000000000..6ceb06db8 --- /dev/null +++ b/vendor/storj.io/uplink/private/stream/upload.go @@ -0,0 +1,95 @@ +// Copyright (C) 2019 Storj Labs, Inc. +// See LICENSE for copying information. + +package stream + +import ( + "context" + "io" + "sync" + + "github.com/zeebo/errs" + "golang.org/x/sync/errgroup" + + "storj.io/common/storj" + "storj.io/uplink/private/metainfo/kvmetainfo" + "storj.io/uplink/private/storage/streams" +) + +// Upload implements Writer and Closer for writing to stream. +type Upload struct { + ctx context.Context + stream kvmetainfo.MutableStream + streams streams.Store + writer io.WriteCloser + closed bool + errgroup errgroup.Group + + // mu protects meta + mu sync.RWMutex + meta *streams.Meta +} + +// NewUpload creates new stream upload. +func NewUpload(ctx context.Context, stream kvmetainfo.MutableStream, streams streams.Store) *Upload { + reader, writer := io.Pipe() + + upload := Upload{ + ctx: ctx, + stream: stream, + streams: streams, + writer: writer, + } + + upload.errgroup.Go(func() error { + m, err := streams.Put(ctx, storj.JoinPaths(stream.BucketName(), stream.Path()), reader, stream, stream.Expires()) + if err != nil { + return errs.Combine(err, reader.CloseWithError(err)) + } + + upload.mu.Lock() + upload.meta = &m + upload.mu.Unlock() + + return nil + }) + + return &upload +} + +// Write writes len(data) bytes from data to the underlying data stream. +// +// See io.Writer for more details. +func (upload *Upload) Write(data []byte) (n int, err error) { + if upload.closed { + return 0, Error.New("already closed") + } + + return upload.writer.Write(data) +} + +// Close closes the stream and releases the underlying resources. +func (upload *Upload) Close() error { + if upload.closed { + return Error.New("already closed") + } + + upload.closed = true + + err := upload.writer.Close() + + // Wait for streams.Put to commit the upload to the PointerDB + return errs.Combine(err, upload.errgroup.Wait()) +} + +// Meta returns the metadata of the uploaded object. +// +// Will return nil if the upload is still in progress. +func (upload *Upload) Meta() *streams.Meta { + upload.mu.RLock() + defer upload.mu.RUnlock() + + // we can safely return the pointer because it doesn't change after the + // upload finishes + return upload.meta +} diff --git a/vendor/storj.io/uplink/project.go b/vendor/storj.io/uplink/project.go new file mode 100644 index 000000000..2e56ef897 --- /dev/null +++ b/vendor/storj.io/uplink/project.go @@ -0,0 +1,152 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + + "github.com/zeebo/errs" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + "storj.io/common/encryption" + "storj.io/common/memory" + "storj.io/common/rpc" + "storj.io/common/storj" + "storj.io/uplink/internal/telemetryclient" + "storj.io/uplink/private/ecclient" + "storj.io/uplink/private/metainfo" + "storj.io/uplink/private/metainfo/kvmetainfo" + "storj.io/uplink/private/storage/segments" + "storj.io/uplink/private/storage/streams" +) + +// maxSegmentSize can be used to override max segment size with ldflags build parameter. +// Example: go build -ldflags "-X 'storj.io/uplink.maxSegmentSize=1MiB'" storj.io/storj/cmd/uplink +var maxSegmentSize string + +// Project provides access to managing buckets and objects. +type Project struct { + config Config + access *Access + dialer rpc.Dialer + metainfo *metainfo.Client + project *kvmetainfo.Project + db *kvmetainfo.DB + streams streams.Store + + eg *errgroup.Group + telemetry telemetryclient.Client +} + +// OpenProject opens a project with the specific access grant. +func OpenProject(ctx context.Context, access *Access) (*Project, error) { + return (Config{}).OpenProject(ctx, access) +} + +// OpenProject opens a project with the specific access grant. +func (config Config) OpenProject(ctx context.Context, access *Access) (project *Project, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + if access == nil { + return nil, packageError.New("access grant is nil") + } + + var telemetry telemetryclient.Client + if ctor, ok := telemetryclient.ConstructorFrom(ctx); ok { + telemetry, err = ctor(zap.L(), access.satelliteAddress) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + telemetry.Stop() + } + }() + } + + metainfo, dialer, _, err := config.dial(ctx, access.satelliteAddress, access.apiKey) + if err != nil { + return nil, packageError.Wrap(err) + } + + proj, err := kvmetainfo.SetupProject(metainfo) + if err != nil { + return nil, packageError.Wrap(err) + } + + // TODO: All these should be controlled by the satellite and not configured by the uplink. + // For now we need to have these hard coded values that match the satellite configuration + // to be able to create the underlying ecclient, segement store and stream store. + var ( + segmentsSize = 64 * memory.MiB.Int64() + maxInlineSize = 4 * memory.KiB.Int() + ) + + if maxSegmentSize != "" { + segmentsSize, err = memory.ParseString(maxSegmentSize) + if err != nil { + return nil, packageError.Wrap(err) + } + } + + // TODO: This should come from the EncryptionAccess. For now it's hardcoded to twice the + // stripe size of the default redundancy scheme on the satellite. + encBlockSize := 29 * 256 * memory.B.Int32() + + // TODO: What is the correct way to derive a named zap.Logger from config.Log? + ec := ecclient.NewClient(zap.L().Named("ecclient"), dialer, 0) + segmentStore := segments.NewSegmentStore(metainfo, ec) + + encryptionParameters := storj.EncryptionParameters{ + // TODO: the cipher should be provided by the Access, but we don't store it there yet. + CipherSuite: storj.EncAESGCM, + BlockSize: encBlockSize, + } + + maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(segmentsSize, encryptionParameters) + if err != nil { + return nil, packageError.Wrap(err) + } + + streamStore, err := streams.NewStreamStore(metainfo, segmentStore, segmentsSize, access.encAccess.Store(), int(encryptionParameters.BlockSize), encryptionParameters.CipherSuite, maxInlineSize, maxEncryptedSegmentSize) + if err != nil { + return nil, packageError.Wrap(err) + } + + db := kvmetainfo.New(proj, metainfo, streamStore, segmentStore, access.encAccess.Store()) + + var eg errgroup.Group + if telemetry != nil { + eg.Go(func() error { + telemetry.Run(ctx) + return nil + }) + } + + return &Project{ + config: config, + access: access, + dialer: dialer, + metainfo: metainfo, + project: proj, + db: db, + streams: streamStore, + eg: &eg, + telemetry: telemetry, + }, nil +} + +// Close closes the project and all associated resources. +func (project *Project) Close() (err error) { + if project.telemetry != nil { + project.telemetry.Stop() + err = errs.Combine( + project.eg.Wait(), + project.telemetry.Report(context.Background()), + ) + } + return packageError.Wrap(errs.Combine(err, project.metainfo.Close())) +} diff --git a/vendor/storj.io/uplink/upload.go b/vendor/storj.io/uplink/upload.go new file mode 100644 index 000000000..8e818ad08 --- /dev/null +++ b/vendor/storj.io/uplink/upload.go @@ -0,0 +1,166 @@ +// Copyright (C) 2020 Storj Labs, Inc. +// See LICENSE for copying information. + +package uplink + +import ( + "context" + "errors" + "sync/atomic" + "time" + + "github.com/zeebo/errs" + + "storj.io/common/pb" + "storj.io/common/storj" + "storj.io/uplink/private/metainfo/kvmetainfo" + "storj.io/uplink/private/stream" +) + +// ErrUploadDone is returned when either Abort or Commit has already been called. +var ErrUploadDone = errors.New("upload done") + +// UploadOptions contains additional options for uploading. +type UploadOptions struct { + // When Expires is zero, there is no expiration. + Expires time.Time +} + +// UploadObject starts an upload to the specific key. +func (project *Project) UploadObject(ctx context.Context, bucket, key string, options *UploadOptions) (upload *Upload, err error) { + defer mon.Func().ResetTrace(&ctx)(&err) + + if bucket == "" { + return nil, errwrapf("%w (%q)", ErrBucketNameInvalid, bucket) + } + if key == "" { + return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + } + + if options == nil { + options = &UploadOptions{} + } + + b := storj.Bucket{Name: bucket} + obj, err := project.db.CreateObject(ctx, b, key, nil) + if err != nil { + if storj.ErrNoPath.Has(err) { + return nil, errwrapf("%w (%q)", ErrObjectKeyInvalid, key) + } + return nil, convertKnownErrors(err, bucket) + } + + info := obj.Info() + mutableStream, err := obj.CreateStream(ctx) + if err != nil { + return nil, packageError.Wrap(err) + } + + ctx, cancel := context.WithCancel(ctx) + + upload = &Upload{ + cancel: cancel, + bucket: bucket, + object: convertObject(&info), + } + upload.upload = stream.NewUpload(ctx, dynamicMetadata{ + MutableStream: mutableStream, + object: upload.object, + expires: options.Expires, + }, project.streams) + return upload, nil +} + +// Upload is an upload to Storj Network. +type Upload struct { + aborted int32 + cancel context.CancelFunc + upload *stream.Upload + bucket string + object *Object +} + +// Info returns the last information about the uploaded object. +func (upload *Upload) Info() *Object { + meta := upload.upload.Meta() + if meta != nil { + upload.object.System.ContentLength = meta.Size + upload.object.System.Created = meta.Modified + } + return upload.object +} + +// Write uploads len(p) bytes from p to the object's data stream. +// It returns the number of bytes written from p (0 <= n <= len(p)) +// and any error encountered that caused the write to stop early. +func (upload *Upload) Write(p []byte) (n int, err error) { + return upload.upload.Write(p) +} + +// Commit commits data to the store. +// +// Returns ErrUploadDone when either Abort or Commit has already been called. +func (upload *Upload) Commit() error { + if atomic.LoadInt32(&upload.aborted) == 1 { + return errwrapf("%w: already aborted", ErrUploadDone) + } + + err := upload.upload.Close() + if err != nil && errs.Unwrap(err).Error() == "already closed" { + return errwrapf("%w: already committed", ErrUploadDone) + } + + return convertKnownErrors(err, upload.bucket) +} + +// Abort aborts the upload. +// +// Returns ErrUploadDone when either Abort or Commit has already been called. +func (upload *Upload) Abort() error { + if upload.upload.Meta() != nil { + return errwrapf("%w: already committed", ErrUploadDone) + } + + if !atomic.CompareAndSwapInt32(&upload.aborted, 0, 1) { + return errwrapf("%w: already aborted", ErrUploadDone) + } + + upload.cancel() + return nil +} + +// SetCustomMetadata updates custom metadata to be included with the object. +// If it is nil, it won't be modified. +func (upload *Upload) SetCustomMetadata(ctx context.Context, custom CustomMetadata) error { + if atomic.LoadInt32(&upload.aborted) == 1 { + return errwrapf("%w: upload aborted", ErrUploadDone) + } + if upload.upload.Meta() != nil { + return errwrapf("%w: already committed", ErrUploadDone) + } + + if custom != nil { + if err := custom.Verify(); err != nil { + return packageError.Wrap(err) + } + upload.object.Custom = custom.Clone() + } + + return nil +} + +type dynamicMetadata struct { + kvmetainfo.MutableStream + object *Object + expires time.Time +} + +func (meta dynamicMetadata) Metadata() ([]byte, error) { + return pb.Marshal(&pb.SerializableMeta{ + UserDefined: meta.object.Custom.Clone(), + }) +} + +func (meta dynamicMetadata) Expires() time.Time { + return meta.expires +}