From fdf750e4d4273620e774d03db087ab0dd4eef8c5 Mon Sep 17 00:00:00 2001 From: 6543 <6543@obermui.de> Date: Sun, 10 May 2020 07:40:54 +0200 Subject: [PATCH] [Vendor] blevesearch v0.8.1 -> v1.0.7 (#11360) * Update blevesearch v0.8.1 -> v1.0.7 * make vendor Co-authored-by: zeripath --- go.mod | 17 +- go.sum | 58 +- .../RoaringBitmap/roaring/.travis.yml | 1 + .../RoaringBitmap/roaring/README.md | 10 +- .../RoaringBitmap/roaring/arraycontainer.go | 12 + .../RoaringBitmap/roaring/bitmapcontainer.go | 12 + .../RoaringBitmap/roaring/roaring.go | 85 +- .../RoaringBitmap/roaring/roaringarray.go | 17 +- .../RoaringBitmap/roaring/runcontainer.go | 12 + .../RoaringBitmap/roaring/setutil.go | 1 + .../github.com/RoaringBitmap/roaring/util.go | 2 +- .../github.com/blevesearch/bleve/.travis.yml | 10 +- .../analysis/lang/en/stemmer_en_snowball.go | 49 + .../github.com/blevesearch/bleve/geo/geo.go | 8 +- .../github.com/blevesearch/bleve/geo/parse.go | 8 +- vendor/github.com/blevesearch/bleve/go.mod | 25 + .../bleve/index/scorch/introducer.go | 95 +- .../blevesearch/bleve/index/scorch/merge.go | 85 +- .../bleve/index/scorch/optimize.go | 74 +- .../bleve/index/scorch/persister.go | 69 +- .../blevesearch/bleve/index/scorch/scorch.go | 78 +- .../bleve/index/scorch/segment/empty.go | 8 +- .../bleve/index/scorch/segment/plugin.go | 58 + .../bleve/index/scorch/segment/segment.go | 16 + .../bleve/index/scorch/segment/unadorned.go | 148 ++ .../bleve/index/scorch/segment_plugin.go | 77 + .../bleve/index/scorch/snapshot_rollback.go | 138 +- .../blevesearch/bleve/index/scorch/stats.go | 10 +- .../bleve/index/store/boltdb/iterator.go | 2 +- .../bleve/index/store/boltdb/reader.go | 2 +- .../bleve/index/store/boltdb/store.go | 2 +- .../bleve/index/upsidedown/upsidedown.go | 3 +- .../bleve/search/collector/topn.go | 13 + .../highlight/fragmenter/simple/simple.go | 5 + .../search/searcher/search_geoboundingbox.go | 15 +- .../searcher/search_geopointdistance.go | 17 +- .../search/searcher/search_geopolygon.go | 50 +- .../blevesearch/go-porterstemmer/go.mod | 3 + .../mmap-go/.gitignore | 2 + .../blevesearch/mmap-go/.travis.yml | 16 + .../{edsrzf => blevesearch}/mmap-go/LICENSE | 0 .../{edsrzf => blevesearch}/mmap-go/README.md | 0 vendor/github.com/blevesearch/mmap-go/go.mod | 3 + vendor/github.com/blevesearch/mmap-go/go.sum | 2 + .../{edsrzf => blevesearch}/mmap-go/mmap.go | 0 .../mmap-go/mmap_unix.go | 0 .../mmap-go/mmap_windows.go | 22 +- vendor/github.com/blevesearch/segment/go.mod | 3 + .../blevesearch/snowballstem/COPYING | 29 + .../blevesearch/snowballstem/README.md | 66 + .../blevesearch/snowballstem/among.go | 16 + .../snowballstem/english/english_stemmer.go | 1341 +++++++++++++++++ .../blevesearch/snowballstem/env.go | 389 +++++ .../blevesearch/snowballstem/gen.go | 61 + .../blevesearch/snowballstem/go.mod | 3 + .../blevesearch/snowballstem/util.go | 34 + .../github.com/blevesearch/zap/v11/.gitignore | 12 + vendor/github.com/blevesearch/zap/v11/LICENSE | 202 +++ .../scorch/segment/zap => zap/v11}/README.md | 0 .../scorch/segment/zap => zap/v11}/build.go | 7 +- .../segment/zap => zap/v11}/contentcoder.go | 0 .../scorch/segment/zap => zap/v11}/count.go | 0 .../scorch/segment/zap => zap/v11}/dict.go | 0 .../segment/zap => zap/v11}/docvalues.go | 4 - .../segment/zap => zap/v11}/enumerator.go | 0 vendor/github.com/blevesearch/zap/v11/go.mod | 12 + .../segment/zap => zap/v11}/intcoder.go | 0 .../scorch/segment/zap => zap/v11}/merge.go | 32 +- .../scorch/segment/zap => zap/v11}/new.go | 12 +- .../github.com/blevesearch/zap/v11/plugin.go | 37 + .../scorch/segment/zap => zap/v11}/posting.go | 21 +- .../scorch/segment/zap => zap/v11}/read.go | 0 .../scorch/segment/zap => zap/v11}/segment.go | 6 +- .../scorch/segment/zap => zap/v11}/write.go | 0 .../scorch/segment/zap => zap/v11}/zap.md | 0 .../github.com/blevesearch/zap/v12/.gitignore | 12 + vendor/github.com/blevesearch/zap/v12/LICENSE | 202 +++ .../github.com/blevesearch/zap/v12/README.md | 158 ++ .../github.com/blevesearch/zap/v12/build.go | 156 ++ .../github.com/blevesearch/zap/v12/chunk.go | 54 + .../blevesearch/zap/v12/contentcoder.go | 243 +++ .../github.com/blevesearch/zap/v12/count.go | 61 + vendor/github.com/blevesearch/zap/v12/dict.go | 263 ++++ .../blevesearch/zap/v12/docvalues.go | 312 ++++ .../blevesearch/zap/v12/enumerator.go | 138 ++ vendor/github.com/blevesearch/zap/v12/go.mod | 12 + .../blevesearch/zap/v12/intDecoder.go | 111 ++ .../blevesearch/zap/v12/intcoder.go | 203 +++ .../github.com/blevesearch/zap/v12/merge.go | 847 +++++++++++ vendor/github.com/blevesearch/zap/v12/new.go | 860 +++++++++++ .../github.com/blevesearch/zap/v12/plugin.go | 37 + .../github.com/blevesearch/zap/v12/posting.go | 798 ++++++++++ vendor/github.com/blevesearch/zap/v12/read.go | 43 + .../github.com/blevesearch/zap/v12/segment.go | 572 +++++++ .../github.com/blevesearch/zap/v12/write.go | 145 ++ vendor/github.com/blevesearch/zap/v12/zap.md | 177 +++ vendor/github.com/couchbase/vellum/go.mod | 2 +- vendor/github.com/couchbase/vellum/go.sum | 5 +- .../couchbase/vellum/vellum_mmap.go | 2 +- vendor/github.com/etcd-io/bbolt/bolt_arm.go | 28 - .../golang/protobuf/proto/buffer.go | 14 +- .../golang/protobuf/proto/extensions.go | 4 +- .../golang/protobuf/proto/registry.go | 10 +- .../golang/protobuf/proto/text_encode.go | 8 +- vendor/github.com/mschoch/smat/go.mod | 3 + vendor/github.com/steveyen/gtreap/go.mod | 3 + vendor/github.com/tinylib/msgp/msgp/json.go | 62 +- .../etcd-io => go.etcd.io}/bbolt/.gitignore | 0 .../etcd-io => go.etcd.io}/bbolt/.travis.yml | 2 +- .../etcd-io => go.etcd.io}/bbolt/LICENSE | 0 .../etcd-io => go.etcd.io}/bbolt/Makefile | 0 .../etcd-io => go.etcd.io}/bbolt/README.md | 4 +- .../etcd-io => go.etcd.io}/bbolt/bolt_386.go | 3 - .../bbolt/bolt_amd64.go | 3 - vendor/go.etcd.io/bbolt/bolt_arm.go | 7 + .../bbolt/bolt_arm64.go | 3 - .../bbolt/bolt_linux.go | 0 .../bbolt/bolt_mips64x.go | 3 - .../bbolt/bolt_mipsx.go | 3 - .../bbolt/bolt_openbsd.go | 0 .../etcd-io => go.etcd.io}/bbolt/bolt_ppc.go | 3 - .../bbolt/bolt_ppc64.go | 3 - .../bbolt/bolt_ppc64le.go | 3 - .../bbolt/bolt_riscv64.go | 3 - .../bbolt/bolt_s390x.go | 3 - .../etcd-io => go.etcd.io}/bbolt/bolt_unix.go | 2 +- vendor/go.etcd.io/bbolt/bolt_unix_aix.go | 90 ++ .../bbolt/bolt_unix_solaris.go | 0 .../bbolt/bolt_windows.go | 0 .../bbolt/boltsync_unix.go | 0 .../etcd-io => go.etcd.io}/bbolt/bucket.go | 34 +- .../etcd-io => go.etcd.io}/bbolt/cursor.go | 2 +- .../etcd-io => go.etcd.io}/bbolt/db.go | 4 +- .../etcd-io => go.etcd.io}/bbolt/doc.go | 0 .../etcd-io => go.etcd.io}/bbolt/errors.go | 0 .../etcd-io => go.etcd.io}/bbolt/freelist.go | 37 +- .../bbolt/freelist_hmap.go | 2 +- vendor/go.etcd.io/bbolt/go.mod | 5 + vendor/go.etcd.io/bbolt/go.sum | 2 + .../etcd-io => go.etcd.io}/bbolt/node.go | 61 +- .../etcd-io => go.etcd.io}/bbolt/page.go | 56 +- .../etcd-io => go.etcd.io}/bbolt/tx.go | 17 +- .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/unix/README.md | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 8 +- .../x/sys/unix/syscall_darwin.1_13.go | 21 +- .../golang.org/x/sys/unix/syscall_darwin.go | 1 + .../x/sys/unix/syscall_darwin_386.go | 11 - .../x/sys/unix/syscall_darwin_amd64.go | 11 - .../x/sys/unix/syscall_darwin_arm.go | 11 - .../x/sys/unix/syscall_darwin_arm64.go | 11 - vendor/golang.org/x/sys/unix/syscall_linux.go | 13 +- .../x/sys/unix/syscall_linux_386.go | 2 +- .../x/sys/unix/syscall_linux_amd64.go | 2 +- .../x/sys/unix/syscall_linux_arm.go | 2 +- .../x/sys/unix/syscall_linux_arm64.go | 28 +- .../x/sys/unix/syscall_linux_mips64x.go | 2 +- .../x/sys/unix/syscall_linux_mipsx.go | 2 +- .../x/sys/unix/syscall_linux_ppc64x.go | 2 +- .../x/sys/unix/syscall_linux_riscv64.go | 8 +- .../x/sys/unix/syscall_linux_s390x.go | 2 +- .../x/sys/unix/syscall_linux_sparc64.go | 2 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 17 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 27 +- .../x/sys/unix/zerrors_linux_386.go | 4 + .../x/sys/unix/zerrors_linux_amd64.go | 4 + .../x/sys/unix/zerrors_linux_arm.go | 4 + .../x/sys/unix/zerrors_linux_arm64.go | 4 + .../x/sys/unix/zerrors_linux_mips.go | 4 + .../x/sys/unix/zerrors_linux_mips64.go | 4 + .../x/sys/unix/zerrors_linux_mips64le.go | 4 + .../x/sys/unix/zerrors_linux_mipsle.go | 4 + .../x/sys/unix/zerrors_linux_ppc64.go | 4 + .../x/sys/unix/zerrors_linux_ppc64le.go | 4 + .../x/sys/unix/zerrors_linux_riscv64.go | 4 + .../x/sys/unix/zerrors_linux_s390x.go | 4 + .../x/sys/unix/zerrors_linux_sparc64.go | 4 + .../x/sys/unix/zsyscall_darwin_386.1_11.go | 22 +- .../x/sys/unix/zsyscall_darwin_386.go | 32 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 22 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 32 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 22 +- .../x/sys/unix/zsyscall_darwin_arm.go | 32 +- .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 22 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 32 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 31 + .../x/sys/unix/zsyscall_linux_386.go | 2 +- .../x/sys/unix/zsyscall_linux_amd64.go | 2 +- .../x/sys/unix/zsyscall_linux_arm.go | 2 +- .../x/sys/unix/zsyscall_linux_arm64.go | 4 +- .../x/sys/unix/zsyscall_linux_mips.go | 2 +- .../x/sys/unix/zsyscall_linux_mips64.go | 2 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 2 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 2 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 2 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 2 +- .../x/sys/unix/zsyscall_linux_s390x.go | 2 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 2 +- .../x/sys/unix/zsysnum_linux_386.go | 2 + .../x/sys/unix/zsysnum_linux_amd64.go | 2 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 2 + .../x/sys/unix/zsysnum_linux_mips.go | 2 + .../x/sys/unix/zsysnum_linux_mips64.go | 2 + .../x/sys/unix/zsysnum_linux_mips64le.go | 2 + .../x/sys/unix/zsysnum_linux_mipsle.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 + .../x/sys/unix/zsysnum_linux_s390x.go | 2 + .../x/sys/unix/zsysnum_linux_sparc64.go | 2 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 56 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 1 + .../x/sys/unix/ztypes_linux_amd64.go | 1 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1 + .../x/sys/unix/ztypes_linux_arm64.go | 1 + .../x/sys/unix/ztypes_linux_mips.go | 1 + .../x/sys/unix/ztypes_linux_mips64.go | 1 + .../x/sys/unix/ztypes_linux_mips64le.go | 1 + .../x/sys/unix/ztypes_linux_mipsle.go | 1 + .../x/sys/unix/ztypes_linux_ppc64.go | 1 + .../x/sys/unix/ztypes_linux_ppc64le.go | 1 + .../x/sys/unix/ztypes_linux_riscv64.go | 1 + .../x/sys/unix/ztypes_linux_s390x.go | 1 + .../x/sys/unix/ztypes_linux_sparc64.go | 1 + .../golang.org/x/sys/windows/dll_windows.go | 29 + .../protobuf/encoding/prototext/encode.go | 6 + .../protobuf/internal/descfmt/stringer.go | 2 +- .../internal/fieldnum/descriptor_gen.go | 21 +- .../protobuf/internal/fieldsort/fieldsort.go | 4 +- .../protobuf/internal/filedesc/desc.go | 67 +- .../protobuf/internal/filedesc/desc_lazy.go | 9 + .../protobuf/internal/impl/codec_gen.go | 40 + .../protobuf/internal/impl/codec_message.go | 31 +- .../protobuf/internal/impl/codec_tables.go | 3 + .../protobuf/internal/impl/convert.go | 22 +- .../protobuf/internal/impl/convert_list.go | 10 +- .../protobuf/internal/impl/convert_map.go | 6 +- .../protobuf/internal/impl/legacy_export.go | 12 - .../internal/impl/legacy_extension.go | 2 + .../protobuf/internal/impl/message.go | 3 +- .../protobuf/internal/impl/message_reflect.go | 25 +- .../internal/impl/message_reflect_field.go | 35 +- .../protobuf/internal/impl/validate.go | 2 +- .../protobuf/internal/impl/weak.go | 64 +- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/checkinit.go | 6 + .../protobuf/proto/encode.go | 35 + .../protobuf/proto/extension.go | 82 +- .../google.golang.org/protobuf/proto/merge.go | 3 + .../google.golang.org/protobuf/proto/size.go | 5 + .../protobuf/reflect/protoreflect/proto.go | 5 +- .../protobuf/reflect/protoreflect/type.go | 13 + .../protobuf/reflect/protoreflect/value.go | 4 +- vendor/modules.txt | 48 +- 255 files changed, 9786 insertions(+), 974 deletions(-) create mode 100644 vendor/github.com/blevesearch/bleve/analysis/lang/en/stemmer_en_snowball.go create mode 100644 vendor/github.com/blevesearch/bleve/go.mod create mode 100644 vendor/github.com/blevesearch/bleve/index/scorch/segment/plugin.go create mode 100644 vendor/github.com/blevesearch/bleve/index/scorch/segment/unadorned.go create mode 100644 vendor/github.com/blevesearch/bleve/index/scorch/segment_plugin.go create mode 100644 vendor/github.com/blevesearch/go-porterstemmer/go.mod rename vendor/github.com/{edsrzf => blevesearch}/mmap-go/.gitignore (78%) create mode 100644 vendor/github.com/blevesearch/mmap-go/.travis.yml rename vendor/github.com/{edsrzf => blevesearch}/mmap-go/LICENSE (100%) rename vendor/github.com/{edsrzf => blevesearch}/mmap-go/README.md (100%) create mode 100644 vendor/github.com/blevesearch/mmap-go/go.mod create mode 100644 vendor/github.com/blevesearch/mmap-go/go.sum rename vendor/github.com/{edsrzf => blevesearch}/mmap-go/mmap.go (100%) rename vendor/github.com/{edsrzf => blevesearch}/mmap-go/mmap_unix.go (100%) rename vendor/github.com/{edsrzf => blevesearch}/mmap-go/mmap_windows.go (91%) create mode 100644 vendor/github.com/blevesearch/segment/go.mod create mode 100644 vendor/github.com/blevesearch/snowballstem/COPYING create mode 100644 vendor/github.com/blevesearch/snowballstem/README.md create mode 100644 vendor/github.com/blevesearch/snowballstem/among.go create mode 100644 vendor/github.com/blevesearch/snowballstem/english/english_stemmer.go create mode 100644 vendor/github.com/blevesearch/snowballstem/env.go create mode 100644 vendor/github.com/blevesearch/snowballstem/gen.go create mode 100644 vendor/github.com/blevesearch/snowballstem/go.mod create mode 100644 vendor/github.com/blevesearch/snowballstem/util.go create mode 100644 vendor/github.com/blevesearch/zap/v11/.gitignore create mode 100644 vendor/github.com/blevesearch/zap/v11/LICENSE rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/README.md (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/build.go (97%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/contentcoder.go (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/count.go (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/dict.go (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/docvalues.go (99%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/enumerator.go (100%) create mode 100644 vendor/github.com/blevesearch/zap/v11/go.mod rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/intcoder.go (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/merge.go (96%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/new.go (98%) create mode 100644 vendor/github.com/blevesearch/zap/v11/plugin.go rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/posting.go (97%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/read.go (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/segment.go (99%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/write.go (100%) rename vendor/github.com/blevesearch/{bleve/index/scorch/segment/zap => zap/v11}/zap.md (100%) create mode 100644 vendor/github.com/blevesearch/zap/v12/.gitignore create mode 100644 vendor/github.com/blevesearch/zap/v12/LICENSE create mode 100644 vendor/github.com/blevesearch/zap/v12/README.md create mode 100644 vendor/github.com/blevesearch/zap/v12/build.go create mode 100644 vendor/github.com/blevesearch/zap/v12/chunk.go create mode 100644 vendor/github.com/blevesearch/zap/v12/contentcoder.go create mode 100644 vendor/github.com/blevesearch/zap/v12/count.go create mode 100644 vendor/github.com/blevesearch/zap/v12/dict.go create mode 100644 vendor/github.com/blevesearch/zap/v12/docvalues.go create mode 100644 vendor/github.com/blevesearch/zap/v12/enumerator.go create mode 100644 vendor/github.com/blevesearch/zap/v12/go.mod create mode 100644 vendor/github.com/blevesearch/zap/v12/intDecoder.go create mode 100644 vendor/github.com/blevesearch/zap/v12/intcoder.go create mode 100644 vendor/github.com/blevesearch/zap/v12/merge.go create mode 100644 vendor/github.com/blevesearch/zap/v12/new.go create mode 100644 vendor/github.com/blevesearch/zap/v12/plugin.go create mode 100644 vendor/github.com/blevesearch/zap/v12/posting.go create mode 100644 vendor/github.com/blevesearch/zap/v12/read.go create mode 100644 vendor/github.com/blevesearch/zap/v12/segment.go create mode 100644 vendor/github.com/blevesearch/zap/v12/write.go create mode 100644 vendor/github.com/blevesearch/zap/v12/zap.md delete mode 100644 vendor/github.com/etcd-io/bbolt/bolt_arm.go create mode 100644 vendor/github.com/mschoch/smat/go.mod create mode 100644 vendor/github.com/steveyen/gtreap/go.mod rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/.gitignore (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/.travis.yml (96%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/LICENSE (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/Makefile (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/README.md (98%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_386.go (72%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_amd64.go (73%) create mode 100644 vendor/go.etcd.io/bbolt/bolt_arm.go rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_arm64.go (75%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_linux.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_mips64x.go (75%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_mipsx.go (74%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_openbsd.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_ppc.go (74%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_ppc64.go (75%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_ppc64le.go (75%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_riscv64.go (75%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_s390x.go (75%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_unix.go (98%) create mode 100644 vendor/go.etcd.io/bbolt/bolt_unix_aix.go rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_unix_solaris.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bolt_windows.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/boltsync_unix.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/bucket.go (95%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/cursor.go (99%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/db.go (99%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/doc.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/errors.go (100%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/freelist.go (90%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/freelist_hmap.go (99%) create mode 100644 vendor/go.etcd.io/bbolt/go.mod create mode 100644 vendor/go.etcd.io/bbolt/go.sum rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/node.go (91%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/page.go (71%) rename vendor/{github.com/etcd-io => go.etcd.io}/bbolt/tx.go (98%) create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go diff --git a/go.mod b/go.mod index d0406bc17e..657ed9c034 100644 --- a/go.mod +++ b/go.mod @@ -19,15 +19,11 @@ require ( gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7 github.com/BurntSushi/toml v0.3.1 github.com/PuerkitoBio/goquery v1.5.0 - github.com/RoaringBitmap/roaring v0.4.21 // indirect + github.com/RoaringBitmap/roaring v0.4.23 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect - github.com/blevesearch/bleve v0.8.1 - github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect - github.com/blevesearch/go-porterstemmer v1.0.2 // indirect - github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect + github.com/blevesearch/bleve v1.0.7 github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 // indirect - github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd // indirect github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect @@ -36,7 +32,6 @@ require ( github.com/dustin/go-humanize v1.0.0 github.com/editorconfig/editorconfig-core-go/v2 v2.1.1 github.com/emirpasic/gods v1.12.0 - github.com/etcd-io/bbolt v1.3.3 // indirect github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect @@ -53,7 +48,7 @@ require ( github.com/gobwas/glob v0.2.3 github.com/gogs/chardet v0.0.0-20191104214054-4b6791f73a28 github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14 - github.com/golang/protobuf v1.4.0 // indirect + github.com/golang/protobuf v1.4.1 // indirect github.com/google/go-github/v24 v24.0.1 github.com/gorilla/context v1.1.1 github.com/hashicorp/go-retryablehttp v0.6.6 // indirect @@ -95,10 +90,9 @@ require ( github.com/sergi/go-diff v1.1.0 github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd - github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect github.com/stretchr/testify v1.4.0 github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect - github.com/tinylib/msgp v1.1.1 // indirect + github.com/tinylib/msgp v1.1.2 // indirect github.com/tstranex/u2f v1.0.0 github.com/unknwon/cae v1.0.0 github.com/unknwon/com v1.0.1 @@ -109,11 +103,10 @@ require ( github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 github.com/yuin/goldmark v1.1.25 github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60 - go.etcd.io/bbolt v1.3.3 // indirect golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd + golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f golang.org/x/text v0.3.2 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224 diff --git a/go.sum b/go.sum index b80584360b..25c37d1e07 100644 --- a/go.sum +++ b/go.sum @@ -56,6 +56,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8= github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= +github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo= +github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Unknwon/com v0.0.0-20190321035513-0fed4efef755/go.mod h1:voKvFVpXBJxdIPeqjoJuLK+UVcRlo/JLjeToGxPYu68= @@ -83,14 +85,22 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blevesearch/bleve v0.8.1 h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU= -github.com/blevesearch/bleve v0.8.1/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw= -github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA= -github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= -github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I= -github.com/blevesearch/go-porterstemmer v1.0.2/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA= -github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg= -github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8= +github.com/blevesearch/bleve v1.0.7 h1:4PspZE7XABMSKcVpzAKp0E05Yer1PIYmTWk+1ngNr/c= +github.com/blevesearch/bleve v1.0.7/go.mod h1:3xvmBtaw12Y4C9iA1RTzwWCof5j5HjydjCTiDE2TeE0= +github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ= +github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ= +github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo= +github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M= +github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= +github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= +github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac= +github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ= +github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s= +github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs= +github.com/blevesearch/zap/v11 v11.0.7 h1:nnmAOP6eXBkqEa1Srq1eqA5Wmn4w+BZjLdjynNxvd+M= +github.com/blevesearch/zap/v11 v11.0.7/go.mod h1:bJoY56fdU2m/IP4LLz/1h4jY2thBoREvoqbuJ8zhm9k= +github.com/blevesearch/zap/v12 v12.0.7 h1:y8FWSAYkdc4p1dn4YLxNNr1dxXlSUsakJh2Fc/r6cj4= +github.com/blevesearch/zap/v12 v12.0.7/go.mod h1:70DNK4ZN4tb42LubeDbfpp6xnm8g3ROYVvvZ6pEoXD8= github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 h1:NGpwhs9FOwddM6TptNrq2ycby4s24TcppSe5uG4DA/Q= github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= @@ -108,6 +118,7 @@ github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHo github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k= github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM= github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 h1:vZryARwW4PSFXd9arwegEywvMTvPuXL3/oa+4L5NTe8= @@ -116,8 +127,9 @@ github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b h1:bZ9rKU2/V8sY+ github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 h1:0WMIDtuXCKEm4wtAJgAAXa/qtM5O9MariLwgHaRlYmk= github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= -github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd h1:zeuJhcG3f8eePshH3KxkNE+Xtl53pVln9MOUPMyr/1w= -github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd/go.mod h1:xbc8Ff/oG7h2ejd7AlwOpfd+6QZntc92ygpAOfGwcKY= +github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs= +github.com/couchbase/vellum v1.0.1 h1:qrj9ohvZedvc51S5KzPfJ6P6z0Vqzv7Lx7k3mVc2WOk= +github.com/couchbase/vellum v1.0.1/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4= github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8= github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7/go.mod h1:mby/05p8HE5yHEAKiIH/555NoblMs7PtW6NrYshDruc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -153,8 +165,6 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a h1:M1bRpaZAn4GSsqu3hdK2R8H0AH9O6vqCTCbm2oAFGfE= github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a/go.mod h1:MkKY/CB98aVE4VxO63X5vTQKUgcn+3XP15LMASe3lYs= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= @@ -292,6 +302,8 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -396,6 +408,7 @@ github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCW github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -464,6 +477,8 @@ github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c h1:3wkDRdxK92dF+c1ke github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM= +github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw= github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc h1:z1PgdCCmYYVL0BoJTUgmAq1p7ca8fzYIPsNyfsN3xAU= github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -534,6 +549,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T github.com/quasoft/websspi v1.0.0 h1:5nDgdM5xSur9s+B5w2xQ5kxf5nUGqgFgU4W0aDLZ8Mw= github.com/quasoft/websspi v1.0.0/go.mod h1:HmVdl939dQ0WIXZhyik+ARdI03M6bQzaSEKcgpFmewk= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ= github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -578,8 +594,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s= -github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw= +github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM= +github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -596,8 +612,8 @@ github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= -github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= +github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/toqueteos/trie v1.0.0 h1:8i6pXxNUXNRAqP246iibb7w/pSFquNTQ+uNfriG7vlk= github.com/toqueteos/trie v1.0.0/go.mod h1:Ywk48QhEqhU1+DwhMkJ2x7eeGxDHiGkAdc9+0DYcbsM= @@ -637,8 +653,8 @@ github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60/go.mod h1:i9Vhc github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -728,6 +744,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -746,10 +763,13 @@ golang.org/x/sys v0.0.0-20190907184412-d223b2b6db03/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI= +golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -822,6 +842,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= diff --git a/vendor/github.com/RoaringBitmap/roaring/.travis.yml b/vendor/github.com/RoaringBitmap/roaring/.travis.yml index 8839c14fd0..c17804322b 100644 --- a/vendor/github.com/RoaringBitmap/roaring/.travis.yml +++ b/vendor/github.com/RoaringBitmap/roaring/.travis.yml @@ -14,6 +14,7 @@ go: - "1.10.x" - "1.11.x" - "1.12.x" +- "1.13.x" - tip # whitelist diff --git a/vendor/github.com/RoaringBitmap/roaring/README.md b/vendor/github.com/RoaringBitmap/roaring/README.md index b711d09ec2..94fdf057e5 100644 --- a/vendor/github.com/RoaringBitmap/roaring/README.md +++ b/vendor/github.com/RoaringBitmap/roaring/README.md @@ -29,11 +29,17 @@ Roaring bitmaps are found to work well in many important applications: The ``roaring`` Go library is used by -* [Cloud Torrent](https://github.com/jpillora/cloud-torrent): a self-hosted remote torrent client -* [runv](https://github.com/hyperhq/runv): an Hypervisor-based runtime for the Open Containers Initiative +* [Cloud Torrent](https://github.com/jpillora/cloud-torrent) +* [runv](https://github.com/hyperhq/runv) * [InfluxDB](https://www.influxdata.com) * [Pilosa](https://www.pilosa.com/) * [Bleve](http://www.blevesearch.com) +* [lindb](https://github.com/lindb/lindb) +* [Elasticell](https://github.com/deepfabric/elasticell) +* [SourceGraph](https://github.com/sourcegraph/sourcegraph) +* [M3](https://github.com/m3db/m3) +* [trident](https://github.com/NetApp/trident) + This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com). diff --git a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go index 621616f5df..eb124f3b76 100644 --- a/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/arraycontainer.go @@ -24,6 +24,18 @@ func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uin } } +func (ac *arrayContainer) iterate(cb func(x uint16) bool) bool { + iterator := shortIterator{ac.content, 0} + + for iterator.hasNext() { + if !cb(iterator.next()) { + return false + } + } + + return true +} + func (ac *arrayContainer) getShortIterator() shortPeekable { return &shortIterator{ac.content, 0} } diff --git a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go index e749721bb2..cd259fd2d9 100644 --- a/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go @@ -96,6 +96,18 @@ func (bc *bitmapContainer) maximum() uint16 { return uint16(0) } +func (bc *bitmapContainer) iterate(cb func(x uint16) bool) bool { + iterator := bitmapContainerShortIterator{bc, bc.NextSetBit(0)} + + for iterator.hasNext() { + if !cb(iterator.next()) { + return false + } + } + + return true +} + type bitmapContainerShortIterator struct { ptr *bitmapContainer i int diff --git a/vendor/github.com/RoaringBitmap/roaring/roaring.go b/vendor/github.com/RoaringBitmap/roaring/roaring.go index df58cc30b5..ed75d58b97 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaring.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaring.go @@ -416,6 +416,38 @@ func (rb *Bitmap) String() string { return buffer.String() } +// Iterate iterates over the bitmap, calling the given callback with each value in the bitmap. If the callback returns +// false, the iteration is halted. +// The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove). +// There is no guarantee as to what order the values will be iterated +func (rb *Bitmap) Iterate(cb func(x uint32) bool) { + for i := 0; i < rb.highlowcontainer.size(); i++ { + hs := uint32(rb.highlowcontainer.getKeyAtIndex(i)) << 16 + c := rb.highlowcontainer.getContainerAtIndex(i) + + var shouldContinue bool + // This is hacky but it avoids allocations from invoking an interface method with a closure + switch t := c.(type) { + case *arrayContainer: + shouldContinue = t.iterate(func(x uint16) bool { + return cb(uint32(x) | hs) + }) + case *runContainer16: + shouldContinue = t.iterate(func(x uint16) bool { + return cb(uint32(x) | hs) + }) + case *bitmapContainer: + shouldContinue = t.iterate(func(x uint16) bool { + return cb(uint32(x) | hs) + }) + } + + if !shouldContinue { + break + } + } +} + // Iterator creates a new IntPeekable to iterate over the integers contained in the bitmap, in sorted order; // the iterator becomes invalid if the bitmap is modified (e.g., with Add or Remove). func (rb *Bitmap) Iterator() IntPeekable { @@ -475,41 +507,72 @@ func (rb *Bitmap) Equals(o interface{}) bool { // AddOffset adds the value 'offset' to each and every value in a bitmap, generating a new bitmap in the process func AddOffset(x *Bitmap, offset uint32) (answer *Bitmap) { - containerOffset := highbits(offset) - inOffset := lowbits(offset) + return AddOffset64(x, int64(offset)) +} + +// AddOffset64 adds the value 'offset' to each and every value in a bitmap, generating a new bitmap in the process +// If offset + element is outside of the range [0,2^32), that the element will be dropped +func AddOffset64(x *Bitmap, offset int64) (answer *Bitmap) { + // we need "offset" to be a long because we want to support values + // between -0xFFFFFFFF up to +-0xFFFFFFFF + var containerOffset64 int64 + + if offset < 0 { + containerOffset64 = (offset - (1 << 16) + 1) / (1 << 16) + } else { + containerOffset64 = offset >> 16 + } + + if containerOffset64 >= (1<<16) || containerOffset64 <= -(1<<16) { + return New() + } + + containerOffset := int32(containerOffset64) + inOffset := (uint16)(offset - containerOffset64*(1<<16)) + if inOffset == 0 { answer = x.Clone() for pos := 0; pos < answer.highlowcontainer.size(); pos++ { - key := answer.highlowcontainer.getKeyAtIndex(pos) + key := int32(answer.highlowcontainer.getKeyAtIndex(pos)) key += containerOffset - answer.highlowcontainer.keys[pos] = key + + if key >= 0 && key <= MaxUint16 { + answer.highlowcontainer.keys[pos] = uint16(key) + } } } else { answer = New() + for pos := 0; pos < x.highlowcontainer.size(); pos++ { - key := x.highlowcontainer.getKeyAtIndex(pos) + key := int32(x.highlowcontainer.getKeyAtIndex(pos)) key += containerOffset + c := x.highlowcontainer.getContainerAtIndex(pos) offsetted := c.addOffset(inOffset) - if offsetted[0].getCardinality() > 0 { + + if offsetted[0].getCardinality() > 0 && (key >= 0 && key <= MaxUint16) { curSize := answer.highlowcontainer.size() - lastkey := uint16(0) + lastkey := int32(0) + if curSize > 0 { - lastkey = answer.highlowcontainer.getKeyAtIndex(curSize - 1) + lastkey = int32(answer.highlowcontainer.getKeyAtIndex(curSize - 1)) } + if curSize > 0 && lastkey == key { prev := answer.highlowcontainer.getContainerAtIndex(curSize - 1) orrseult := prev.ior(offsetted[0]) answer.highlowcontainer.setContainerAtIndex(curSize-1, orrseult) } else { - answer.highlowcontainer.appendContainer(key, offsetted[0], false) + answer.highlowcontainer.appendContainer(uint16(key), offsetted[0], false) } } - if offsetted[1].getCardinality() > 0 { - answer.highlowcontainer.appendContainer(key+1, offsetted[1], false) + + if offsetted[1].getCardinality() > 0 && ((key+1) >= 0 && (key+1) <= MaxUint16) { + answer.highlowcontainer.appendContainer(uint16(key+1), offsetted[1], false) } } } + return answer } diff --git a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go index d9d5edda73..3dddbffdbc 100644 --- a/vendor/github.com/RoaringBitmap/roaring/roaringarray.go +++ b/vendor/github.com/RoaringBitmap/roaring/roaringarray.go @@ -4,9 +4,10 @@ import ( "bytes" "encoding/binary" "fmt" + "io" + snappy "github.com/glycerine/go-unsnap-stream" "github.com/tinylib/msgp/msgp" - "io" ) //go:generate msgp -unexported @@ -38,6 +39,7 @@ type container interface { inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx) xor(r container) container getShortIterator() shortPeekable + iterate(cb func(x uint16) bool) bool getReverseIterator() shortIterable getManyIterator() manyIterable contains(i uint16) bool @@ -488,20 +490,15 @@ func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) { nw += 2 binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1)) nw += 2 - - // compute isRun bitmap - var ir []byte - - isRun := newBitmapContainer() + // compute isRun bitmap without temporary allocation + var runbitmapslice = buf[nw:nw+isRunSizeInBytes] for i, c := range ra.containers { switch c.(type) { case *runContainer16: - isRun.iadd(uint16(i)) + runbitmapslice[i / 8] |= 1<<(uint(i)%8) } } - // convert to little endian - ir = isRun.asLittleEndianByteSlice()[:isRunSizeInBytes] - nw += copy(buf[nw:], ir) + nw += isRunSizeInBytes } else { binary.LittleEndian.PutUint32(buf[0:], uint32(serialCookieNoRunContainer)) nw += 4 diff --git a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go index cbffdaf24d..5a0f985f1d 100644 --- a/vendor/github.com/RoaringBitmap/roaring/runcontainer.go +++ b/vendor/github.com/RoaringBitmap/roaring/runcontainer.go @@ -1162,6 +1162,18 @@ func (rc *runContainer16) newRunIterator16() *runIterator16 { return &runIterator16{rc: rc, curIndex: 0, curPosInIndex: 0} } +func (rc *runContainer16) iterate(cb func(x uint16) bool) bool { + iterator := runIterator16{rc, 0, 0} + + for iterator.hasNext() { + if !cb(iterator.next()) { + return false + } + } + + return true +} + // hasNext returns false if calling next will panic. It // returns true when there is at least one more value // available in the iteration sequence. diff --git a/vendor/github.com/RoaringBitmap/roaring/setutil.go b/vendor/github.com/RoaringBitmap/roaring/setutil.go index 3e8c01dd1f..2fe8151411 100644 --- a/vendor/github.com/RoaringBitmap/roaring/setutil.go +++ b/vendor/github.com/RoaringBitmap/roaring/setutil.go @@ -14,6 +14,7 @@ func equal(a, b []uint16) bool { func difference(set1 []uint16, set2 []uint16, buffer []uint16) int { if 0 == len(set2) { + buffer = buffer[:len(set1)] for k := 0; k < len(set1); k++ { buffer[k] = set1[k] } diff --git a/vendor/github.com/RoaringBitmap/roaring/util.go b/vendor/github.com/RoaringBitmap/roaring/util.go index 3a9a47236b..6763033916 100644 --- a/vendor/github.com/RoaringBitmap/roaring/util.go +++ b/vendor/github.com/RoaringBitmap/roaring/util.go @@ -112,7 +112,7 @@ func highbits(x uint32) uint16 { return uint16(x >> 16) } func lowbits(x uint32) uint16 { - return uint16(x & 0xFFFF) + return uint16(x & maxLowBit) } const maxLowBit = 0xFFFF diff --git a/vendor/github.com/blevesearch/bleve/.travis.yml b/vendor/github.com/blevesearch/bleve/.travis.yml index e00e7b9948..7b7297afe3 100644 --- a/vendor/github.com/blevesearch/bleve/.travis.yml +++ b/vendor/github.com/blevesearch/bleve/.travis.yml @@ -3,9 +3,9 @@ sudo: false language: go go: - - "1.10.x" - - "1.11.x" - "1.12.x" + - "1.13.x" + - "1.14.x" script: - go get golang.org/x/tools/cmd/cover @@ -16,11 +16,7 @@ script: - go test -race -v $(go list ./... | grep -v vendor/) - go vet $(go list ./... | grep -v vendor/) - go test ./test -v -indexType scorch - - if [[ ${TRAVIS_GO_VERSION} =~ ^1\.10 ]]; then - echo "errcheck skipped for go version" $TRAVIS_GO_VERSION; - else - errcheck -ignorepkg fmt $(go list ./... | grep -v vendor/); - fi + - errcheck -ignorepkg fmt $(go list ./... | grep -v vendor/); - docs/project-code-coverage.sh - docs/build_children.sh diff --git a/vendor/github.com/blevesearch/bleve/analysis/lang/en/stemmer_en_snowball.go b/vendor/github.com/blevesearch/bleve/analysis/lang/en/stemmer_en_snowball.go new file mode 100644 index 0000000000..225bb0664d --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/analysis/lang/en/stemmer_en_snowball.go @@ -0,0 +1,49 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package en + +import ( + "github.com/blevesearch/bleve/analysis" + "github.com/blevesearch/bleve/registry" + + "github.com/blevesearch/snowballstem" + "github.com/blevesearch/snowballstem/english" +) + +const SnowballStemmerName = "stemmer_en_snowball" + +type EnglishStemmerFilter struct { +} + +func NewEnglishStemmerFilter() *EnglishStemmerFilter { + return &EnglishStemmerFilter{} +} + +func (s *EnglishStemmerFilter) Filter(input analysis.TokenStream) analysis.TokenStream { + for _, token := range input { + env := snowballstem.NewEnv(string(token.Term)) + english.Stem(env) + token.Term = []byte(env.Current()) + } + return input +} + +func EnglishStemmerFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) { + return NewEnglishStemmerFilter(), nil +} + +func init() { + registry.RegisterTokenFilter(SnowballStemmerName, EnglishStemmerFilterConstructor) +} diff --git a/vendor/github.com/blevesearch/bleve/geo/geo.go b/vendor/github.com/blevesearch/bleve/geo/geo.go index 583451e308..b18ace4337 100644 --- a/vendor/github.com/blevesearch/bleve/geo/geo.go +++ b/vendor/github.com/blevesearch/bleve/geo/geo.go @@ -33,14 +33,16 @@ var minLonRad = minLon * degreesToRadian var minLatRad = minLat * degreesToRadian var maxLonRad = maxLon * degreesToRadian var maxLatRad = maxLat * degreesToRadian -var geoTolerance = 1E-6 +var geoTolerance = 1e-6 var lonScale = float64((uint64(0x1)< uint64(po.PersisterNapUnderNumFiles) { s.removeOldData() - numFilesOnDisk, _ = s.diskFileStats() + numFilesOnDisk, _, _ = s.diskFileStats(nil) } // Persister pause until the merger catches up to reduce the segment @@ -305,7 +302,7 @@ OUTER: // let the watchers proceed if they lag behind persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers) - numFilesOnDisk, _ = s.diskFileStats() + numFilesOnDisk, _, _ = s.diskFileStats(nil) } return lastMergedEpoch, persistWatchers @@ -360,13 +357,13 @@ var DefaultMinSegmentsForInMemoryMerge = 2 func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) ( bool, error) { // collect the in-memory zap segments (SegmentBase instances) - var sbs []*zap.SegmentBase + var sbs []segment.Segment var sbsDrops []*roaring.Bitmap var sbsIndexes []int for i, segmentSnapshot := range snapshot.segment { - if sb, ok := segmentSnapshot.segment.(*zap.SegmentBase); ok { - sbs = append(sbs, sb) + if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); !ok { + sbs = append(sbs, segmentSnapshot.segment) sbsDrops = append(sbsDrops, segmentSnapshot.deleted) sbsIndexes = append(sbsIndexes, i) } @@ -377,7 +374,7 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) ( } newSnapshot, newSegmentID, err := s.mergeSegmentBases( - snapshot, sbs, sbsDrops, sbsIndexes, DefaultChunkFactor) + snapshot, sbs, sbsDrops, sbsIndexes) if err != nil { return false, err } @@ -459,13 +456,13 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) { if err != nil { return err } - err = metaBucket.Put([]byte("type"), []byte(zap.Type)) + err = metaBucket.Put(boltMetaDataSegmentTypeKey, []byte(s.segPlugin.Type())) if err != nil { return err } buf := make([]byte, binary.MaxVarintLen32) - binary.BigEndian.PutUint32(buf, zap.Version) - err = metaBucket.Put([]byte("version"), buf) + binary.BigEndian.PutUint32(buf, s.segPlugin.Version()) + err = metaBucket.Put(boltMetaDataSegmentVersionKey, buf) if err != nil { return err } @@ -494,11 +491,19 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) { return err } switch seg := segmentSnapshot.segment.(type) { - case *zap.SegmentBase: + case segment.PersistedSegment: + path := seg.Path() + filename := strings.TrimPrefix(path, s.path+string(os.PathSeparator)) + err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename)) + if err != nil { + return err + } + filenames = append(filenames, filename) + case segment.UnpersistedSegment: // need to persist this to disk filename := zapFileName(segmentSnapshot.id) path := s.path + string(os.PathSeparator) + filename - err = zap.PersistSegmentBase(seg, path) + err = seg.Persist(path) if err != nil { return fmt.Errorf("error persisting segment: %v", err) } @@ -508,14 +513,7 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) { return err } filenames = append(filenames, filename) - case *zap.Segment: - path := seg.Path() - filename := strings.TrimPrefix(path, s.path+string(os.PathSeparator)) - err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename)) - if err != nil { - return err - } - filenames = append(filenames, filename) + default: return fmt.Errorf("unknown segment type: %T", seg) } @@ -553,7 +551,7 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) { } }() for segmentID, path := range newSegmentPaths { - newSegments[segmentID], err = zap.Open(path) + newSegments[segmentID], err = s.segPlugin.Open(path) if err != nil { return fmt.Errorf("error opening new segment at %s, %v", path, err) } @@ -609,6 +607,8 @@ var boltPathKey = []byte{'p'} var boltDeletedKey = []byte{'d'} var boltInternalKey = []byte{'i'} var boltMetaDataKey = []byte{'m'} +var boltMetaDataSegmentTypeKey = []byte("type") +var boltMetaDataSegmentVersionKey = []byte("version") func (s *Scorch) loadFromBolt() error { return s.rootBolt.View(func(tx *bolt.Tx) error { @@ -693,6 +693,23 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) { refs: 1, creator: "loadSnapshot", } + // first we look for the meta-data bucket, this will tell us + // which segment type/version was used for this snapshot + // all operations for this scorch will use this type/version + metaBucket := snapshot.Bucket(boltMetaDataKey) + if metaBucket == nil { + _ = rv.DecRef() + return nil, fmt.Errorf("meta-data bucket missing") + } + segmentType := string(metaBucket.Get(boltMetaDataSegmentTypeKey)) + segmentVersion := binary.BigEndian.Uint32( + metaBucket.Get(boltMetaDataSegmentVersionKey)) + err := s.loadSegmentPlugin(segmentType, segmentVersion) + if err != nil { + _ = rv.DecRef() + return nil, fmt.Errorf( + "unable to load correct segment wrapper: %v", err) + } var running uint64 c := snapshot.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { @@ -737,7 +754,7 @@ func (s *Scorch) loadSegment(segmentBucket *bolt.Bucket) (*SegmentSnapshot, erro return nil, fmt.Errorf("segment path missing") } segmentPath := s.path + string(os.PathSeparator) + string(pathBytes) - segment, err := zap.Open(segmentPath) + segment, err := s.segPlugin.Open(segmentPath) if err != nil { return nil, fmt.Errorf("error opening bolt segment: %v", err) } diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/scorch.go b/vendor/github.com/blevesearch/bleve/index/scorch/scorch.go index 44a97d1ea6..80f9e3a797 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/scorch.go +++ b/vendor/github.com/blevesearch/bleve/index/scorch/scorch.go @@ -28,10 +28,9 @@ import ( "github.com/blevesearch/bleve/document" "github.com/blevesearch/bleve/index" "github.com/blevesearch/bleve/index/scorch/segment" - "github.com/blevesearch/bleve/index/scorch/segment/zap" "github.com/blevesearch/bleve/index/store" "github.com/blevesearch/bleve/registry" - bolt "github.com/etcd-io/bbolt" + bolt "go.etcd.io/bbolt" ) const Name = "scorch" @@ -67,7 +66,6 @@ type Scorch struct { persists chan *persistIntroduction merges chan *segmentMerge introducerNotifier chan *epochWatcher - revertToSnapshots chan *snapshotReversion persisterNotifier chan *epochWatcher rootBolt *bolt.DB asyncTasks sync.WaitGroup @@ -78,6 +76,8 @@ type Scorch struct { pauseLock sync.RWMutex pauseCount uint64 + + segPlugin segment.Plugin } type internalStats struct { @@ -101,7 +101,25 @@ func NewScorch(storeName string, nextSnapshotEpoch: 1, closeCh: make(chan struct{}), ineligibleForRemoval: map[string]bool{}, + segPlugin: defaultSegmentPlugin, } + + // check if the caller has requested a specific segment type/version + forcedSegmentVersion, ok := config["forceSegmentVersion"].(int) + if ok { + forcedSegmentType, ok2 := config["forceSegmentType"].(string) + if !ok2 { + return nil, fmt.Errorf( + "forceSegmentVersion set to %d, must also specify forceSegmentType", forcedSegmentVersion) + } + + err := rv.loadSegmentPlugin(forcedSegmentType, + uint32(forcedSegmentVersion)) + if err != nil { + return nil, err + } + } + rv.root = &IndexSnapshot{parent: rv, refs: 1, creator: "NewScorch"} ro, ok := config["read_only"].(bool) if ok { @@ -221,8 +239,8 @@ func (s *Scorch) openBolt() error { s.persists = make(chan *persistIntroduction) s.merges = make(chan *segmentMerge) s.introducerNotifier = make(chan *epochWatcher, 1) - s.revertToSnapshots = make(chan *snapshotReversion) s.persisterNotifier = make(chan *epochWatcher, 1) + s.closeCh = make(chan struct{}) if !s.readOnly && s.path != "" { err := s.removeOldZapFiles() // Before persister or merger create any new files. @@ -263,7 +281,10 @@ func (s *Scorch) Close() (err error) { err = s.rootBolt.Close() s.rootLock.Lock() if s.root != nil { - _ = s.root.DecRef() + err2 := s.root.DecRef() + if err == nil { + err = err2 + } } s.root = nil s.rootLock.Unlock() @@ -349,7 +370,7 @@ func (s *Scorch) Batch(batch *index.Batch) (err error) { var newSegment segment.Segment var bufBytes uint64 if len(analysisResults) > 0 { - newSegment, bufBytes, err = zap.AnalysisResultsToSegmentBase(analysisResults, DefaultChunkFactor) + newSegment, bufBytes, err = s.segPlugin.New(analysisResults) if err != nil { return err } @@ -466,8 +487,9 @@ func (s *Scorch) Stats() json.Marshaler { return &s.stats } -func (s *Scorch) diskFileStats() (uint64, uint64) { - var numFilesOnDisk, numBytesUsedDisk uint64 +func (s *Scorch) diskFileStats(rootSegmentPaths map[string]struct{}) (uint64, + uint64, uint64) { + var numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot uint64 if s.path != "" { finfos, err := ioutil.ReadDir(s.path) if err == nil { @@ -475,24 +497,47 @@ func (s *Scorch) diskFileStats() (uint64, uint64) { if !finfo.IsDir() { numBytesUsedDisk += uint64(finfo.Size()) numFilesOnDisk++ + if rootSegmentPaths != nil { + fname := s.path + string(os.PathSeparator) + finfo.Name() + if _, fileAtRoot := rootSegmentPaths[fname]; fileAtRoot { + numBytesOnDiskByRoot += uint64(finfo.Size()) + } + } } } } } - return numFilesOnDisk, numBytesUsedDisk + // if no root files path given, then consider all disk files. + if rootSegmentPaths == nil { + return numFilesOnDisk, numBytesUsedDisk, numBytesUsedDisk + } + + return numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot +} + +func (s *Scorch) rootDiskSegmentsPaths() map[string]struct{} { + rv := make(map[string]struct{}, len(s.root.segment)) + for _, segmentSnapshot := range s.root.segment { + if seg, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok { + rv[seg.Path()] = struct{}{} + } + } + return rv } func (s *Scorch) StatsMap() map[string]interface{} { m := s.stats.ToMap() - numFilesOnDisk, numBytesUsedDisk := s.diskFileStats() + s.rootLock.RLock() + rootSegPaths := s.rootDiskSegmentsPaths() + m["CurFilesIneligibleForRemoval"] = uint64(len(s.ineligibleForRemoval)) + s.rootLock.RUnlock() + + numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot := s.diskFileStats(rootSegPaths) m["CurOnDiskBytes"] = numBytesUsedDisk m["CurOnDiskFiles"] = numFilesOnDisk - s.rootLock.RLock() - m["CurFilesIneligibleForRemoval"] = uint64(len(s.ineligibleForRemoval)) - s.rootLock.RUnlock() // TODO: consider one day removing these backwards compatible // names for apps using the old names m["updates"] = m["TotUpdates"] @@ -507,8 +552,11 @@ func (s *Scorch) StatsMap() map[string]interface{} { m["num_items_introduced"] = m["TotIntroducedItems"] m["num_items_persisted"] = m["TotPersistedItems"] m["num_recs_to_persist"] = m["TotItemsToPersist"] - m["num_bytes_used_disk"] = m["CurOnDiskBytes"] - m["num_files_on_disk"] = m["CurOnDiskFiles"] + // total disk bytes found in index directory inclusive of older snapshots + m["num_bytes_used_disk"] = numBytesUsedDisk + // total disk bytes by the latest root index, exclusive of older snapshots + m["num_bytes_used_disk_by_root"] = numBytesOnDiskByRoot + m["num_files_on_disk"] = numFilesOnDisk m["num_root_memorysegments"] = m["TotMemorySegmentsAtRoot"] m["num_root_filesegments"] = m["TotFileSegmentsAtRoot"] m["num_persister_nap_pause_completed"] = m["TotPersisterNapPauseCompleted"] diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/empty.go b/vendor/github.com/blevesearch/bleve/index/scorch/segment/empty.go index fdc407a747..340db73a67 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/empty.go +++ b/vendor/github.com/blevesearch/bleve/index/scorch/segment/empty.go @@ -105,10 +105,6 @@ func (e *EmptyDictionaryIterator) Contains(key []byte) (bool, error) { return false, nil } -func (e *EmptyPostingsIterator) Advance(uint64) (Posting, error) { - return nil, nil -} - type EmptyPostingsList struct{} func (e *EmptyPostingsList) Iterator(includeFreq, includeNorm, includeLocations bool, @@ -130,6 +126,10 @@ func (e *EmptyPostingsIterator) Next() (Posting, error) { return nil, nil } +func (e *EmptyPostingsIterator) Advance(uint64) (Posting, error) { + return nil, nil +} + func (e *EmptyPostingsIterator) Size() int { return 0 } diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/plugin.go b/vendor/github.com/blevesearch/bleve/index/scorch/segment/plugin.go new file mode 100644 index 0000000000..d8aaa0b6d1 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/index/scorch/segment/plugin.go @@ -0,0 +1,58 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +import ( + "github.com/RoaringBitmap/roaring" + "github.com/blevesearch/bleve/index" +) + +// Plugin represents the essential functions required by a package to plug in +// it's segment implementation +type Plugin interface { + + // Type is the name for this segment plugin + Type() string + + // Version is a numeric value identifying a specific version of this type. + // When incompatible changes are made to a particular type of plugin, the + // version must be incremented. + Version() uint32 + + // New takes a set of AnalysisResults and turns them into a new Segment + New(results []*index.AnalysisResult) (Segment, uint64, error) + + // Open attempts to open the file at the specified path and + // return the corresponding Segment + Open(path string) (Segment, error) + + // Merge takes a set of Segments, and creates a new segment on disk at + // the specified path. + // Drops is a set of bitmaps (one for each segment) indicating which + // documents can be dropped from the segments during the merge. + // If the closeCh channel is closed, Merge will cease doing work at + // the next opportunity, and return an error (closed). + // StatsReporter can optionally be provided, in which case progress + // made during the merge is reported while operation continues. + // Returns: + // A slice of new document numbers (one for each input segment), + // this allows the caller to know a particular document's new + // document number in the newly merged segment. + // The number of bytes written to the new segment file. + // An error, if any occurred. + Merge(segments []Segment, drops []*roaring.Bitmap, path string, + closeCh chan struct{}, s StatsReporter) ( + [][]uint64, uint64, error) +} diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/segment.go b/vendor/github.com/blevesearch/bleve/index/scorch/segment/segment.go index 34c2bc2048..ddd0d09102 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/segment.go +++ b/vendor/github.com/blevesearch/bleve/index/scorch/segment/segment.go @@ -50,6 +50,16 @@ type Segment interface { DecRef() error } +type UnpersistedSegment interface { + Segment + Persist(path string) error +} + +type PersistedSegment interface { + Segment + Path() string +} + type TermDictionary interface { PostingsList(term []byte, except *roaring.Bitmap, prealloc PostingsList) (PostingsList, error) @@ -96,6 +106,12 @@ type PostingsIterator interface { Size() int } +type OptimizablePostingsIterator interface { + ActualBitmap() *roaring.Bitmap + DocNum1Hit() (uint64, bool) + ReplaceActual(*roaring.Bitmap) +} + type Posting interface { Number() uint64 diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/unadorned.go b/vendor/github.com/blevesearch/bleve/index/scorch/segment/unadorned.go new file mode 100644 index 0000000000..9a4d6c76c9 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/index/scorch/segment/unadorned.go @@ -0,0 +1,148 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package segment + +import ( + "github.com/RoaringBitmap/roaring" + "math" + "reflect" +) + +var reflectStaticSizeUnadornedPostingsIteratorBitmap int +var reflectStaticSizeUnadornedPostingsIterator1Hit int +var reflectStaticSizeUnadornedPosting int + + +func init() { + var pib UnadornedPostingsIteratorBitmap + reflectStaticSizeUnadornedPostingsIteratorBitmap = int(reflect.TypeOf(pib).Size()) + var pi1h UnadornedPostingsIterator1Hit + reflectStaticSizeUnadornedPostingsIterator1Hit = int(reflect.TypeOf(pi1h).Size()) + var up UnadornedPosting + reflectStaticSizeUnadornedPosting = int(reflect.TypeOf(up).Size()) +} + +type UnadornedPostingsIteratorBitmap struct{ + actual roaring.IntPeekable + actualBM *roaring.Bitmap +} + +func (i *UnadornedPostingsIteratorBitmap) Next() (Posting, error) { + return i.nextAtOrAfter(0) +} + +func (i *UnadornedPostingsIteratorBitmap) Advance(docNum uint64) (Posting, error) { + return i.nextAtOrAfter(docNum) +} + +func (i *UnadornedPostingsIteratorBitmap) nextAtOrAfter(atOrAfter uint64) (Posting, error) { + docNum, exists := i.nextDocNumAtOrAfter(atOrAfter) + if !exists { + return nil, nil + } + return UnadornedPosting(docNum), nil +} + +func (i *UnadornedPostingsIteratorBitmap) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool) { + if i.actual == nil || !i.actual.HasNext() { + return 0, false + } + i.actual.AdvanceIfNeeded(uint32(atOrAfter)) + + if !i.actual.HasNext() { + return 0, false // couldn't find anything + } + + return uint64(i.actual.Next()), true +} + +func (i *UnadornedPostingsIteratorBitmap) Size() int { + return reflectStaticSizeUnadornedPostingsIteratorBitmap +} + +func NewUnadornedPostingsIteratorFromBitmap(bm *roaring.Bitmap) PostingsIterator { + return &UnadornedPostingsIteratorBitmap{ + actualBM: bm, + actual: bm.Iterator(), + } +} + +const docNum1HitFinished = math.MaxUint64 + +type UnadornedPostingsIterator1Hit struct{ + docNum uint64 +} + +func (i *UnadornedPostingsIterator1Hit) Next() (Posting, error) { + return i.nextAtOrAfter(0) +} + +func (i *UnadornedPostingsIterator1Hit) Advance(docNum uint64) (Posting, error) { + return i.nextAtOrAfter(docNum) +} + +func (i *UnadornedPostingsIterator1Hit) nextAtOrAfter(atOrAfter uint64) (Posting, error) { + docNum, exists := i.nextDocNumAtOrAfter(atOrAfter) + if !exists { + return nil, nil + } + return UnadornedPosting(docNum), nil +} + +func (i *UnadornedPostingsIterator1Hit) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool) { + if i.docNum == docNum1HitFinished { + return 0, false + } + if i.docNum < atOrAfter { + // advanced past our 1-hit + i.docNum = docNum1HitFinished // consume our 1-hit docNum + return 0, false + } + docNum := i.docNum + i.docNum = docNum1HitFinished // consume our 1-hit docNum + return docNum, true +} + +func (i *UnadornedPostingsIterator1Hit) Size() int { + return reflectStaticSizeUnadornedPostingsIterator1Hit +} + +func NewUnadornedPostingsIteratorFrom1Hit(docNum1Hit uint64) PostingsIterator { + return &UnadornedPostingsIterator1Hit{ + docNum1Hit, + } +} + +type UnadornedPosting uint64 + +func (p UnadornedPosting) Number() uint64 { + return uint64(p) +} + +func (p UnadornedPosting) Frequency() uint64 { + return 0 +} + +func (p UnadornedPosting) Norm() float64 { + return 0 +} + +func (p UnadornedPosting) Locations() []Location { + return nil +} + +func (p UnadornedPosting) Size() int { + return reflectStaticSizeUnadornedPosting +} \ No newline at end of file diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment_plugin.go b/vendor/github.com/blevesearch/bleve/index/scorch/segment_plugin.go new file mode 100644 index 0000000000..01eda7fbd5 --- /dev/null +++ b/vendor/github.com/blevesearch/bleve/index/scorch/segment_plugin.go @@ -0,0 +1,77 @@ +// Copyright (c) 2019 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scorch + +import ( + "fmt" + + "github.com/blevesearch/bleve/index/scorch/segment" + + zapv11 "github.com/blevesearch/zap/v11" + zapv12 "github.com/blevesearch/zap/v12" +) + +var supportedSegmentPlugins map[string]map[uint32]segment.Plugin +var defaultSegmentPlugin segment.Plugin + +func init() { + ResetPlugins() + RegisterPlugin(zapv12.Plugin(), false) + RegisterPlugin(zapv11.Plugin(), true) +} + +func ResetPlugins() { + supportedSegmentPlugins = map[string]map[uint32]segment.Plugin{} +} + +func RegisterPlugin(plugin segment.Plugin, makeDefault bool) { + if _, ok := supportedSegmentPlugins[plugin.Type()]; !ok { + supportedSegmentPlugins[plugin.Type()] = map[uint32]segment.Plugin{} + } + supportedSegmentPlugins[plugin.Type()][plugin.Version()] = plugin + if makeDefault { + defaultSegmentPlugin = plugin + } +} + +func SupportedSegmentTypes() (rv []string) { + for k := range supportedSegmentPlugins { + rv = append(rv, k) + } + return +} + +func SupportedSegmentTypeVersions(typ string) (rv []uint32) { + for k := range supportedSegmentPlugins[typ] { + rv = append(rv, k) + } + return rv +} + +func (s *Scorch) loadSegmentPlugin(forcedSegmentType string, + forcedSegmentVersion uint32) error { + if versions, ok := supportedSegmentPlugins[forcedSegmentType]; ok { + if segPlugin, ok := versions[uint32(forcedSegmentVersion)]; ok { + s.segPlugin = segPlugin + return nil + } + return fmt.Errorf( + "unsupported version %d for segment type: %s, supported: %v", + forcedSegmentVersion, forcedSegmentType, + SupportedSegmentTypeVersions(forcedSegmentType)) + } + return fmt.Errorf("unsupported segment type: %s, supported: %v", + forcedSegmentType, SupportedSegmentTypes()) +} diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/snapshot_rollback.go b/vendor/github.com/blevesearch/bleve/index/scorch/snapshot_rollback.go index 470868d0eb..7cc87bdea0 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/snapshot_rollback.go +++ b/vendor/github.com/blevesearch/bleve/index/scorch/snapshot_rollback.go @@ -17,9 +17,10 @@ package scorch import ( "fmt" "log" + "os" "github.com/blevesearch/bleve/index/scorch/segment" - bolt "github.com/etcd-io/bbolt" + bolt "go.etcd.io/bbolt" ) type RollbackPoint struct { @@ -34,13 +35,22 @@ func (r *RollbackPoint) GetInternal(key []byte) []byte { // RollbackPoints returns an array of rollback points available for // the application to rollback to, with more recent rollback points // (higher epochs) coming first. -func (s *Scorch) RollbackPoints() ([]*RollbackPoint, error) { - if s.rootBolt == nil { - return nil, fmt.Errorf("RollbackPoints: root is nil") +func RollbackPoints(path string) ([]*RollbackPoint, error) { + if len(path) == 0 { + return nil, fmt.Errorf("RollbackPoints: invalid path") + } + + rootBoltPath := path + string(os.PathSeparator) + "root.bolt" + rootBoltOpt := &bolt.Options{ + ReadOnly: true, + } + rootBolt, err := bolt.Open(rootBoltPath, 0600, rootBoltOpt) + if err != nil || rootBolt == nil { + return nil, err } // start a read-only bolt transaction - tx, err := s.rootBolt.Begin(false) + tx, err := rootBolt.Begin(false) if err != nil { return nil, fmt.Errorf("RollbackPoints: failed to start" + " read-only transaction") @@ -49,6 +59,7 @@ func (s *Scorch) RollbackPoints() ([]*RollbackPoint, error) { // read-only bolt transactions to be rolled back defer func() { _ = tx.Rollback() + _ = rootBolt.Close() }() snapshots := tx.Bucket(boltSnapshotsBucket) @@ -105,69 +116,98 @@ func (s *Scorch) RollbackPoints() ([]*RollbackPoint, error) { return rollbackPoints, nil } -// Rollback atomically and durably (if unsafeBatch is unset) brings -// the store back to the point in time as represented by the -// RollbackPoint. Rollback() should only be passed a RollbackPoint -// that came from the same store using the RollbackPoints() API. -func (s *Scorch) Rollback(to *RollbackPoint) error { +// Rollback atomically and durably brings the store back to the point +// in time as represented by the RollbackPoint. +// Rollback() should only be passed a RollbackPoint that came from the +// same store using the RollbackPoints() API along with the index path. +func Rollback(path string, to *RollbackPoint) error { if to == nil { return fmt.Errorf("Rollback: RollbackPoint is nil") } - - if s.rootBolt == nil { - return fmt.Errorf("Rollback: root is nil") + if len(path) == 0 { + return fmt.Errorf("Rollback: index path is empty") } - revert := &snapshotReversion{} + rootBoltPath := path + string(os.PathSeparator) + "root.bolt" + rootBoltOpt := &bolt.Options{ + ReadOnly: false, + } + rootBolt, err := bolt.Open(rootBoltPath, 0600, rootBoltOpt) + if err != nil || rootBolt == nil { + return err + } + defer func() { + err1 := rootBolt.Close() + if err1 != nil && err == nil { + err = err1 + } + }() - s.rootLock.Lock() - - err := s.rootBolt.View(func(tx *bolt.Tx) error { + // pick all the younger persisted epochs in bolt store + // including the target one. + var found bool + var eligibleEpochs []uint64 + err = rootBolt.View(func(tx *bolt.Tx) error { snapshots := tx.Bucket(boltSnapshotsBucket) if snapshots == nil { - return fmt.Errorf("Rollback: no snapshots available") + return nil } - - pos := segment.EncodeUvarintAscending(nil, to.epoch) - - snapshot := snapshots.Bucket(pos) - if snapshot == nil { - return fmt.Errorf("Rollback: snapshot not found") + sc := snapshots.Cursor() + for sk, _ := sc.Last(); sk != nil && !found; sk, _ = sc.Prev() { + _, snapshotEpoch, err := segment.DecodeUvarintAscending(sk) + if err != nil { + continue + } + if snapshotEpoch == to.epoch { + found = true + } + eligibleEpochs = append(eligibleEpochs, snapshotEpoch) } - - indexSnapshot, err := s.loadSnapshot(snapshot) - if err != nil { - return fmt.Errorf("Rollback: unable to load snapshot: %v", err) - } - - // add segments referenced by loaded index snapshot to the - // ineligibleForRemoval map - for _, segSnap := range indexSnapshot.segment { - filename := zapFileName(segSnap.id) - s.ineligibleForRemoval[filename] = true - } - - revert.snapshot = indexSnapshot - revert.applied = make(chan error) - revert.persisted = make(chan error) - return nil }) - s.rootLock.Unlock() + if len(eligibleEpochs) == 0 { + return fmt.Errorf("Rollback: no persisted epochs found in bolt") + } + if !found { + return fmt.Errorf("Rollback: target epoch %d not found in bolt", to.epoch) + } + // start a write transaction + tx, err := rootBolt.Begin(true) if err != nil { return err } - // introduce the reversion - s.revertToSnapshots <- revert + defer func() { + if err == nil { + err = tx.Commit() + } else { + _ = tx.Rollback() + } + if err == nil { + err = rootBolt.Sync() + } + }() - // block until this snapshot is applied - err = <-revert.applied - if err != nil { - return fmt.Errorf("Rollback: failed with err: %v", err) + snapshots := tx.Bucket(boltSnapshotsBucket) + if snapshots == nil { + return nil + } + for _, epoch := range eligibleEpochs { + k := segment.EncodeUvarintAscending(nil, epoch) + if err != nil { + continue + } + if epoch == to.epoch { + // return here as it already processed until the given epoch + return nil + } + err = snapshots.DeleteBucket(k) + if err == bolt.ErrBucketNotFound { + err = nil + } } - return <-revert.persisted + return err } diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/stats.go b/vendor/github.com/blevesearch/bleve/index/scorch/stats.go index 6549fddf51..e638362a71 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/stats.go +++ b/vendor/github.com/blevesearch/bleve/index/scorch/stats.go @@ -98,10 +98,12 @@ type Stats struct { TotFileSegmentsAtRoot uint64 TotFileMergeWrittenBytes uint64 - TotFileMergeZapBeg uint64 - TotFileMergeZapEnd uint64 - TotFileMergeZapTime uint64 - MaxFileMergeZapTime uint64 + TotFileMergeZapBeg uint64 + TotFileMergeZapEnd uint64 + TotFileMergeZapTime uint64 + MaxFileMergeZapTime uint64 + TotFileMergeZapIntroductionTime uint64 + MaxFileMergeZapIntroductionTime uint64 TotFileMergeIntroductions uint64 TotFileMergeIntroductionsDone uint64 diff --git a/vendor/github.com/blevesearch/bleve/index/store/boltdb/iterator.go b/vendor/github.com/blevesearch/bleve/index/store/boltdb/iterator.go index 4b5019f1f2..cf4da87c35 100644 --- a/vendor/github.com/blevesearch/bleve/index/store/boltdb/iterator.go +++ b/vendor/github.com/blevesearch/bleve/index/store/boltdb/iterator.go @@ -17,7 +17,7 @@ package boltdb import ( "bytes" - bolt "github.com/etcd-io/bbolt" + bolt "go.etcd.io/bbolt" ) type Iterator struct { diff --git a/vendor/github.com/blevesearch/bleve/index/store/boltdb/reader.go b/vendor/github.com/blevesearch/bleve/index/store/boltdb/reader.go index 4cd94183c6..7977ebbe56 100644 --- a/vendor/github.com/blevesearch/bleve/index/store/boltdb/reader.go +++ b/vendor/github.com/blevesearch/bleve/index/store/boltdb/reader.go @@ -16,7 +16,7 @@ package boltdb import ( "github.com/blevesearch/bleve/index/store" - bolt "github.com/etcd-io/bbolt" + bolt "go.etcd.io/bbolt" ) type Reader struct { diff --git a/vendor/github.com/blevesearch/bleve/index/store/boltdb/store.go b/vendor/github.com/blevesearch/bleve/index/store/boltdb/store.go index 56613d5315..3c749693c0 100644 --- a/vendor/github.com/blevesearch/bleve/index/store/boltdb/store.go +++ b/vendor/github.com/blevesearch/bleve/index/store/boltdb/store.go @@ -30,7 +30,7 @@ import ( "github.com/blevesearch/bleve/index/store" "github.com/blevesearch/bleve/registry" - bolt "github.com/etcd-io/bbolt" + bolt "go.etcd.io/bbolt" ) const ( diff --git a/vendor/github.com/blevesearch/bleve/index/upsidedown/upsidedown.go b/vendor/github.com/blevesearch/bleve/index/upsidedown/upsidedown.go index 24f5aae949..8e915c6ad5 100644 --- a/vendor/github.com/blevesearch/bleve/index/upsidedown/upsidedown.go +++ b/vendor/github.com/blevesearch/bleve/index/upsidedown/upsidedown.go @@ -820,7 +820,8 @@ func (udc *UpsideDownCouch) Batch(batch *index.Batch) (err error) { if numUpdates > 0 { go func() { - for _, doc := range batch.IndexOps { + for k := range batch.IndexOps { + doc := batch.IndexOps[k] if doc != nil { aw := index.NewAnalysisWork(udc, doc, resultChan) // put the work on the queue diff --git a/vendor/github.com/blevesearch/bleve/search/collector/topn.go b/vendor/github.com/blevesearch/bleve/search/collector/topn.go index a027a12c22..8d4afb63a0 100644 --- a/vendor/github.com/blevesearch/bleve/search/collector/topn.go +++ b/vendor/github.com/blevesearch/bleve/search/collector/topn.go @@ -17,6 +17,7 @@ package collector import ( "context" "reflect" + "strconv" "time" "github.com/blevesearch/bleve/index" @@ -90,6 +91,18 @@ func NewTopNCollectorAfter(size int, sort search.SortOrder, after []string) *Top rv.searchAfter = &search.DocumentMatch{ Sort: after, } + + for pos, ss := range sort { + if ss.RequiresDocID() { + rv.searchAfter.ID = after[pos] + } + if ss.RequiresScoring() { + if score, err := strconv.ParseFloat(after[pos], 64); err == nil { + rv.searchAfter.Score = score + } + } + } + return rv } diff --git a/vendor/github.com/blevesearch/bleve/search/highlight/fragmenter/simple/simple.go b/vendor/github.com/blevesearch/bleve/search/highlight/fragmenter/simple/simple.go index 6f6ecedf55..9c63f7fb6c 100644 --- a/vendor/github.com/blevesearch/bleve/search/highlight/fragmenter/simple/simple.go +++ b/vendor/github.com/blevesearch/bleve/search/highlight/fragmenter/simple/simple.go @@ -58,6 +58,11 @@ OUTER: // push back towards beginning // without cross maxbegin for start > 0 && used < s.fragmentSize { + if start > len(orig) { + // bail if out of bounds, possibly due to token replacement + // e.g with a regexp replacement + continue OUTER + } r, size := utf8.DecodeLastRune(orig[0:start]) if r == utf8.RuneError { continue OUTER // bail diff --git a/vendor/github.com/blevesearch/bleve/search/searcher/search_geoboundingbox.go b/vendor/github.com/blevesearch/bleve/search/searcher/search_geoboundingbox.go index 38cb6467fb..c4b8af9270 100644 --- a/vendor/github.com/blevesearch/bleve/search/searcher/search_geoboundingbox.go +++ b/vendor/github.com/blevesearch/bleve/search/searcher/search_geoboundingbox.go @@ -224,7 +224,8 @@ func ComputeGeoRange(term uint64, shift uint, func buildRectFilter(dvReader index.DocValueReader, field string, minLon, minLat, maxLon, maxLat float64) FilterFunc { return func(d *search.DocumentMatch) bool { - var lon, lat float64 + // check geo matches against all numeric type terms indexed + var lons, lats []float64 var found bool err := dvReader.VisitDocValues(d.IndexInternalID, func(field string, term []byte) { // only consider the values which are shifted 0 @@ -234,15 +235,19 @@ func buildRectFilter(dvReader index.DocValueReader, field string, var i64 int64 i64, err = prefixCoded.Int64() if err == nil { - lon = geo.MortonUnhashLon(uint64(i64)) - lat = geo.MortonUnhashLat(uint64(i64)) + lons = append(lons, geo.MortonUnhashLon(uint64(i64))) + lats = append(lats, geo.MortonUnhashLat(uint64(i64))) found = true } } }) if err == nil && found { - return geo.BoundingBoxContains(lon, lat, - minLon, minLat, maxLon, maxLat) + for i := range lons { + if geo.BoundingBoxContains(lons[i], lats[i], + minLon, minLat, maxLon, maxLat) { + return true + } + } } return false } diff --git a/vendor/github.com/blevesearch/bleve/search/searcher/search_geopointdistance.go b/vendor/github.com/blevesearch/bleve/search/searcher/search_geopointdistance.go index b01ae6a0af..b6f2932445 100644 --- a/vendor/github.com/blevesearch/bleve/search/searcher/search_geopointdistance.go +++ b/vendor/github.com/blevesearch/bleve/search/searcher/search_geopointdistance.go @@ -83,7 +83,7 @@ func boxSearcher(indexReader index.IndexReader, return boxSearcher, nil } - // build geoboundinggox searcher for that bounding box + // build geoboundingbox searcher for that bounding box boxSearcher, err := NewGeoBoundingBoxSearcher(indexReader, topLeftLon, bottomRightLat, bottomRightLon, topLeftLat, field, boost, options, checkBoundaries) @@ -96,7 +96,8 @@ func boxSearcher(indexReader index.IndexReader, func buildDistFilter(dvReader index.DocValueReader, field string, centerLon, centerLat, maxDist float64) FilterFunc { return func(d *search.DocumentMatch) bool { - var lon, lat float64 + // check geo matches against all numeric type terms indexed + var lons, lats []float64 var found bool err := dvReader.VisitDocValues(d.IndexInternalID, func(field string, term []byte) { @@ -106,16 +107,18 @@ func buildDistFilter(dvReader index.DocValueReader, field string, if err == nil && shift == 0 { i64, err := prefixCoded.Int64() if err == nil { - lon = geo.MortonUnhashLon(uint64(i64)) - lat = geo.MortonUnhashLat(uint64(i64)) + lons = append(lons, geo.MortonUnhashLon(uint64(i64))) + lats = append(lats, geo.MortonUnhashLat(uint64(i64))) found = true } } }) if err == nil && found { - dist := geo.Haversin(lon, lat, centerLon, centerLat) - if dist <= maxDist/1000 { - return true + for i := range lons { + dist := geo.Haversin(lons[i], lats[i], centerLon, centerLat) + if dist <= maxDist/1000 { + return true + } } } return false diff --git a/vendor/github.com/blevesearch/bleve/search/searcher/search_geopolygon.go b/vendor/github.com/blevesearch/bleve/search/searcher/search_geopolygon.go index 3bb47519d0..5f16aa8d26 100644 --- a/vendor/github.com/blevesearch/bleve/search/searcher/search_geopolygon.go +++ b/vendor/github.com/blevesearch/bleve/search/searcher/search_geopolygon.go @@ -15,6 +15,7 @@ package searcher import ( + "fmt" "github.com/blevesearch/bleve/geo" "github.com/blevesearch/bleve/index" "github.com/blevesearch/bleve/numeric" @@ -26,6 +27,10 @@ func NewGeoBoundedPolygonSearcher(indexReader index.IndexReader, polygon []geo.Point, field string, boost float64, options search.SearcherOptions) (search.Searcher, error) { + if len(polygon) < 3 { + return nil, fmt.Errorf("Too few points specified for the polygon boundary") + } + // compute the bounding box enclosing the polygon topLeftLon, topLeftLat, bottomRightLon, bottomRightLat, err := geo.BoundingRectangleForPolygon(polygon) @@ -63,7 +68,8 @@ func almostEqual(a, b float64) bool { func buildPolygonFilter(dvReader index.DocValueReader, field string, polygon []geo.Point) FilterFunc { return func(d *search.DocumentMatch) bool { - var lon, lat float64 + // check geo matches against all numeric type terms indexed + var lons, lats []float64 var found bool err := dvReader.VisitDocValues(d.IndexInternalID, func(field string, term []byte) { @@ -73,8 +79,8 @@ func buildPolygonFilter(dvReader index.DocValueReader, field string, if err == nil && shift == 0 { i64, err := prefixCoded.Int64() if err == nil { - lon = geo.MortonUnhashLon(uint64(i64)) - lat = geo.MortonUnhashLat(uint64(i64)) + lons = append(lons, geo.MortonUnhashLon(uint64(i64))) + lats = append(lats, geo.MortonUnhashLat(uint64(i64))) found = true } } @@ -84,26 +90,36 @@ func buildPolygonFilter(dvReader index.DocValueReader, field string, // the polygon. ie it might fail for certain points on the polygon boundaries. if err == nil && found { nVertices := len(polygon) - var inside bool - // check for a direct vertex match - if almostEqual(polygon[0].Lat, lat) && - almostEqual(polygon[0].Lon, lon) { - return true + if len(polygon) < 3 { + return false + } + rayIntersectsSegment := func(point, a, b geo.Point) bool { + return (a.Lat > point.Lat) != (b.Lat > point.Lat) && + point.Lon < (b.Lon-a.Lon)*(point.Lat-a.Lat)/(b.Lat-a.Lat)+a.Lon } - for i := 1; i < nVertices; i++ { - if almostEqual(polygon[i].Lat, lat) && - almostEqual(polygon[i].Lon, lon) { + for i := range lons { + pt := geo.Point{Lon: lons[i], Lat: lats[i]} + inside := rayIntersectsSegment(pt, polygon[len(polygon)-1], polygon[0]) + // check for a direct vertex match + if almostEqual(polygon[0].Lat, lats[i]) && + almostEqual(polygon[0].Lon, lons[i]) { return true } - if (polygon[i].Lat > lat) != (polygon[i-1].Lat > lat) && - lon < (polygon[i-1].Lon-polygon[i].Lon)*(lat-polygon[i].Lat)/ - (polygon[i-1].Lat-polygon[i].Lat)+polygon[i].Lon { - inside = !inside + + for j := 1; j < nVertices; j++ { + if almostEqual(polygon[j].Lat, lats[i]) && + almostEqual(polygon[j].Lon, lons[i]) { + return true + } + if rayIntersectsSegment(pt, polygon[j-1], polygon[j]) { + inside = !inside + } + } + if inside { + return true } } - return inside - } return false } diff --git a/vendor/github.com/blevesearch/go-porterstemmer/go.mod b/vendor/github.com/blevesearch/go-porterstemmer/go.mod new file mode 100644 index 0000000000..d620295d5d --- /dev/null +++ b/vendor/github.com/blevesearch/go-porterstemmer/go.mod @@ -0,0 +1,3 @@ +module github.com/blevesearch/go-porterstemmer + +go 1.13 diff --git a/vendor/github.com/edsrzf/mmap-go/.gitignore b/vendor/github.com/blevesearch/mmap-go/.gitignore similarity index 78% rename from vendor/github.com/edsrzf/mmap-go/.gitignore rename to vendor/github.com/blevesearch/mmap-go/.gitignore index 9aa02c1ed3..0c0a5e4916 100644 --- a/vendor/github.com/edsrzf/mmap-go/.gitignore +++ b/vendor/github.com/blevesearch/mmap-go/.gitignore @@ -6,3 +6,5 @@ _obj _test testdata +/.idea +*.iml \ No newline at end of file diff --git a/vendor/github.com/blevesearch/mmap-go/.travis.yml b/vendor/github.com/blevesearch/mmap-go/.travis.yml new file mode 100644 index 0000000000..169eb1f354 --- /dev/null +++ b/vendor/github.com/blevesearch/mmap-go/.travis.yml @@ -0,0 +1,16 @@ +language: go +os: + - linux + - osx + - windows +go: + - 1.11.4 +env: + global: + - GO111MODULE=on +install: + - go mod download + - go get github.com/mattn/goveralls +script: + - go test -v -covermode=count -coverprofile=coverage.out -bench . -cpu 1,4 + - '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN || true' diff --git a/vendor/github.com/edsrzf/mmap-go/LICENSE b/vendor/github.com/blevesearch/mmap-go/LICENSE similarity index 100% rename from vendor/github.com/edsrzf/mmap-go/LICENSE rename to vendor/github.com/blevesearch/mmap-go/LICENSE diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/blevesearch/mmap-go/README.md similarity index 100% rename from vendor/github.com/edsrzf/mmap-go/README.md rename to vendor/github.com/blevesearch/mmap-go/README.md diff --git a/vendor/github.com/blevesearch/mmap-go/go.mod b/vendor/github.com/blevesearch/mmap-go/go.mod new file mode 100644 index 0000000000..b7f0265734 --- /dev/null +++ b/vendor/github.com/blevesearch/mmap-go/go.mod @@ -0,0 +1,3 @@ +module github.com/blevesearch/mmap-go + +require golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6 diff --git a/vendor/github.com/blevesearch/mmap-go/go.sum b/vendor/github.com/blevesearch/mmap-go/go.sum new file mode 100644 index 0000000000..db2bff5e0f --- /dev/null +++ b/vendor/github.com/blevesearch/mmap-go/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6 h1:IcgEB62HYgAhX0Nd/QrVgZlxlcyxbGQHElLUhW2X4Fo= +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/edsrzf/mmap-go/mmap.go b/vendor/github.com/blevesearch/mmap-go/mmap.go similarity index 100% rename from vendor/github.com/edsrzf/mmap-go/mmap.go rename to vendor/github.com/blevesearch/mmap-go/mmap.go diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_unix.go b/vendor/github.com/blevesearch/mmap-go/mmap_unix.go similarity index 100% rename from vendor/github.com/edsrzf/mmap-go/mmap_unix.go rename to vendor/github.com/blevesearch/mmap-go/mmap_unix.go diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go b/vendor/github.com/blevesearch/mmap-go/mmap_windows.go similarity index 91% rename from vendor/github.com/edsrzf/mmap-go/mmap_windows.go rename to vendor/github.com/blevesearch/mmap-go/mmap_windows.go index 7910da2577..631b3825f9 100644 --- a/vendor/github.com/edsrzf/mmap-go/mmap_windows.go +++ b/vendor/github.com/blevesearch/mmap-go/mmap_windows.go @@ -22,8 +22,9 @@ import ( // We keep this map so that we can get back the original handle from the memory address. type addrinfo struct { - file windows.Handle - mapview windows.Handle + file windows.Handle + mapview windows.Handle + writable bool } var handleLock sync.Mutex @@ -32,13 +33,16 @@ var handleMap = map[uintptr]*addrinfo{} func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { flProtect := uint32(windows.PAGE_READONLY) dwDesiredAccess := uint32(windows.FILE_MAP_READ) + writable := false switch { case prot© != 0: flProtect = windows.PAGE_WRITECOPY dwDesiredAccess = windows.FILE_MAP_COPY + writable = true case prot&RDWR != 0: flProtect = windows.PAGE_READWRITE dwDesiredAccess = windows.FILE_MAP_WRITE + writable = true } if prot&EXEC != 0 { flProtect <<= 4 @@ -67,8 +71,9 @@ func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) { } handleLock.Lock() handleMap[addr] = &addrinfo{ - file: windows.Handle(hfile), - mapview: h, + file: windows.Handle(hfile), + mapview: h, + writable: writable, } handleLock.Unlock() @@ -96,8 +101,13 @@ func (m MMap) flush() error { return errors.New("unknown base address") } - errno = windows.FlushFileBuffers(handle.file) - return os.NewSyscallError("FlushFileBuffers", errno) + if handle.writable { + if err := windows.FlushFileBuffers(handle.file); err != nil { + return os.NewSyscallError("FlushFileBuffers", err) + } + } + + return nil } func (m MMap) lock() error { diff --git a/vendor/github.com/blevesearch/segment/go.mod b/vendor/github.com/blevesearch/segment/go.mod new file mode 100644 index 0000000000..2f65976564 --- /dev/null +++ b/vendor/github.com/blevesearch/segment/go.mod @@ -0,0 +1,3 @@ +module github.com/blevesearch/segment + +go 1.13 diff --git a/vendor/github.com/blevesearch/snowballstem/COPYING b/vendor/github.com/blevesearch/snowballstem/COPYING new file mode 100644 index 0000000000..f36607f9e9 --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/COPYING @@ -0,0 +1,29 @@ +Copyright (c) 2001, Dr Martin Porter +Copyright (c) 2004,2005, Richard Boulton +Copyright (c) 2013, Yoshiki Shibukawa +Copyright (c) 2006,2007,2009,2010,2011,2014-2019, Olly Betts +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + 3. Neither the name of the Snowball project nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/blevesearch/snowballstem/README.md b/vendor/github.com/blevesearch/snowballstem/README.md new file mode 100644 index 0000000000..bb4ff8ab96 --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/README.md @@ -0,0 +1,66 @@ +# snowballstem + +This repository contains the Go stemmers generated by the [Snowball](https://github.com/snowballstem/snowball) project. They are maintained outside of the core bleve package so that they may be more easily be reused in other contexts. + +## Usage + +All these stemmers export a single `Stem()` method which operates on a snowball `Env` structure. The `Env` structure maintains all state for the stemmer. A new `Env` is created to point at an initial string. After stemming, the results of the `Stem()` operation can be retrieved using the `Current()` method. The `Env` structure can be reused for subsequent calls by using the `SetCurrent()` method. + +## Example + +``` +package main + +import ( + "fmt" + + "github.com/blevesearch/snowballstem" + "github.com/blevesearch/snowballstem/english" +) + +func main() { + + // words to stem + words := []string{ + "running", + "jumping", + } + + // build new environment + env := snowballstem.NewEnv("") + + for _, word := range words { + // set up environment for word + env.SetCurrent(word) + // invoke stemmer + english.Stem(env) + // print results + fmt.Printf("%s stemmed to %s\n", word, env.Current()) + } +} +``` +Produces Output: +``` +$ ./snowtest +running stemmed to run +jumping stemmed to jump +``` + +## Testing + +The test harness for these stemmers is hosted in the main [Snowball](https://github.com/snowballstem/snowball) repository. There are functional tests built around the separate [snowballstem-data](https://github.com/snowballstem/snowball-data) repository, and there is support for fuzz-testing the stemmers there as well. + +## Generating the Stemmers + +``` +$ export SNOWBALL=/path/to/github.com/snowballstem/snowball/after/snowball/built +$ go generate +``` + +## Updated the Go Generate Commands + +A simple tool is provided to automate these from the snowball algorithms directory: + +``` +$ go run gengen.go /path/to/github.com/snowballstem/snowball/algorithms +``` diff --git a/vendor/github.com/blevesearch/snowballstem/among.go b/vendor/github.com/blevesearch/snowballstem/among.go new file mode 100644 index 0000000000..1a0c70257f --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/among.go @@ -0,0 +1,16 @@ +package snowballstem + +import "fmt" + +type AmongF func(env *Env, ctx interface{}) bool + +type Among struct { + Str string + A int32 + B int32 + F AmongF +} + +func (a *Among) String() string { + return fmt.Sprintf("str: `%s`, a: %d, b: %d, f: %p", a.Str, a.A, a.B, a.F) +} diff --git a/vendor/github.com/blevesearch/snowballstem/english/english_stemmer.go b/vendor/github.com/blevesearch/snowballstem/english/english_stemmer.go new file mode 100644 index 0000000000..87e1d48e7d --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/english/english_stemmer.go @@ -0,0 +1,1341 @@ +//! This file was generated automatically by the Snowball to Go compiler +//! http://snowballstem.org/ + +package english + +import ( + snowballRuntime "github.com/blevesearch/snowballstem" +) + +var A_0 = []*snowballRuntime.Among{ + {Str: "arsen", A: -1, B: -1, F: nil}, + {Str: "commun", A: -1, B: -1, F: nil}, + {Str: "gener", A: -1, B: -1, F: nil}, +} + +var A_1 = []*snowballRuntime.Among{ + {Str: "'", A: -1, B: 1, F: nil}, + {Str: "'s'", A: 0, B: 1, F: nil}, + {Str: "'s", A: -1, B: 1, F: nil}, +} + +var A_2 = []*snowballRuntime.Among{ + {Str: "ied", A: -1, B: 2, F: nil}, + {Str: "s", A: -1, B: 3, F: nil}, + {Str: "ies", A: 1, B: 2, F: nil}, + {Str: "sses", A: 1, B: 1, F: nil}, + {Str: "ss", A: 1, B: -1, F: nil}, + {Str: "us", A: 1, B: -1, F: nil}, +} + +var A_3 = []*snowballRuntime.Among{ + {Str: "", A: -1, B: 3, F: nil}, + {Str: "bb", A: 0, B: 2, F: nil}, + {Str: "dd", A: 0, B: 2, F: nil}, + {Str: "ff", A: 0, B: 2, F: nil}, + {Str: "gg", A: 0, B: 2, F: nil}, + {Str: "bl", A: 0, B: 1, F: nil}, + {Str: "mm", A: 0, B: 2, F: nil}, + {Str: "nn", A: 0, B: 2, F: nil}, + {Str: "pp", A: 0, B: 2, F: nil}, + {Str: "rr", A: 0, B: 2, F: nil}, + {Str: "at", A: 0, B: 1, F: nil}, + {Str: "tt", A: 0, B: 2, F: nil}, + {Str: "iz", A: 0, B: 1, F: nil}, +} + +var A_4 = []*snowballRuntime.Among{ + {Str: "ed", A: -1, B: 2, F: nil}, + {Str: "eed", A: 0, B: 1, F: nil}, + {Str: "ing", A: -1, B: 2, F: nil}, + {Str: "edly", A: -1, B: 2, F: nil}, + {Str: "eedly", A: 3, B: 1, F: nil}, + {Str: "ingly", A: -1, B: 2, F: nil}, +} + +var A_5 = []*snowballRuntime.Among{ + {Str: "anci", A: -1, B: 3, F: nil}, + {Str: "enci", A: -1, B: 2, F: nil}, + {Str: "ogi", A: -1, B: 13, F: nil}, + {Str: "li", A: -1, B: 16, F: nil}, + {Str: "bli", A: 3, B: 12, F: nil}, + {Str: "abli", A: 4, B: 4, F: nil}, + {Str: "alli", A: 3, B: 8, F: nil}, + {Str: "fulli", A: 3, B: 14, F: nil}, + {Str: "lessli", A: 3, B: 15, F: nil}, + {Str: "ousli", A: 3, B: 10, F: nil}, + {Str: "entli", A: 3, B: 5, F: nil}, + {Str: "aliti", A: -1, B: 8, F: nil}, + {Str: "biliti", A: -1, B: 12, F: nil}, + {Str: "iviti", A: -1, B: 11, F: nil}, + {Str: "tional", A: -1, B: 1, F: nil}, + {Str: "ational", A: 14, B: 7, F: nil}, + {Str: "alism", A: -1, B: 8, F: nil}, + {Str: "ation", A: -1, B: 7, F: nil}, + {Str: "ization", A: 17, B: 6, F: nil}, + {Str: "izer", A: -1, B: 6, F: nil}, + {Str: "ator", A: -1, B: 7, F: nil}, + {Str: "iveness", A: -1, B: 11, F: nil}, + {Str: "fulness", A: -1, B: 9, F: nil}, + {Str: "ousness", A: -1, B: 10, F: nil}, +} + +var A_6 = []*snowballRuntime.Among{ + {Str: "icate", A: -1, B: 4, F: nil}, + {Str: "ative", A: -1, B: 6, F: nil}, + {Str: "alize", A: -1, B: 3, F: nil}, + {Str: "iciti", A: -1, B: 4, F: nil}, + {Str: "ical", A: -1, B: 4, F: nil}, + {Str: "tional", A: -1, B: 1, F: nil}, + {Str: "ational", A: 5, B: 2, F: nil}, + {Str: "ful", A: -1, B: 5, F: nil}, + {Str: "ness", A: -1, B: 5, F: nil}, +} + +var A_7 = []*snowballRuntime.Among{ + {Str: "ic", A: -1, B: 1, F: nil}, + {Str: "ance", A: -1, B: 1, F: nil}, + {Str: "ence", A: -1, B: 1, F: nil}, + {Str: "able", A: -1, B: 1, F: nil}, + {Str: "ible", A: -1, B: 1, F: nil}, + {Str: "ate", A: -1, B: 1, F: nil}, + {Str: "ive", A: -1, B: 1, F: nil}, + {Str: "ize", A: -1, B: 1, F: nil}, + {Str: "iti", A: -1, B: 1, F: nil}, + {Str: "al", A: -1, B: 1, F: nil}, + {Str: "ism", A: -1, B: 1, F: nil}, + {Str: "ion", A: -1, B: 2, F: nil}, + {Str: "er", A: -1, B: 1, F: nil}, + {Str: "ous", A: -1, B: 1, F: nil}, + {Str: "ant", A: -1, B: 1, F: nil}, + {Str: "ent", A: -1, B: 1, F: nil}, + {Str: "ment", A: 15, B: 1, F: nil}, + {Str: "ement", A: 16, B: 1, F: nil}, +} + +var A_8 = []*snowballRuntime.Among{ + {Str: "e", A: -1, B: 1, F: nil}, + {Str: "l", A: -1, B: 2, F: nil}, +} + +var A_9 = []*snowballRuntime.Among{ + {Str: "succeed", A: -1, B: -1, F: nil}, + {Str: "proceed", A: -1, B: -1, F: nil}, + {Str: "exceed", A: -1, B: -1, F: nil}, + {Str: "canning", A: -1, B: -1, F: nil}, + {Str: "inning", A: -1, B: -1, F: nil}, + {Str: "earring", A: -1, B: -1, F: nil}, + {Str: "herring", A: -1, B: -1, F: nil}, + {Str: "outing", A: -1, B: -1, F: nil}, +} + +var A_10 = []*snowballRuntime.Among{ + {Str: "andes", A: -1, B: -1, F: nil}, + {Str: "atlas", A: -1, B: -1, F: nil}, + {Str: "bias", A: -1, B: -1, F: nil}, + {Str: "cosmos", A: -1, B: -1, F: nil}, + {Str: "dying", A: -1, B: 3, F: nil}, + {Str: "early", A: -1, B: 9, F: nil}, + {Str: "gently", A: -1, B: 7, F: nil}, + {Str: "howe", A: -1, B: -1, F: nil}, + {Str: "idly", A: -1, B: 6, F: nil}, + {Str: "lying", A: -1, B: 4, F: nil}, + {Str: "news", A: -1, B: -1, F: nil}, + {Str: "only", A: -1, B: 10, F: nil}, + {Str: "singly", A: -1, B: 11, F: nil}, + {Str: "skies", A: -1, B: 2, F: nil}, + {Str: "skis", A: -1, B: 1, F: nil}, + {Str: "sky", A: -1, B: -1, F: nil}, + {Str: "tying", A: -1, B: 5, F: nil}, + {Str: "ugly", A: -1, B: 8, F: nil}, +} + +var G_v = []byte{17, 65, 16, 1} + +var G_v_WXY = []byte{1, 17, 65, 208, 1} + +var G_valid_LI = []byte{55, 141, 2} + +type Context struct { + b_Y_found bool + i_p2 int + i_p1 int +} + +func r_prelude(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + // (, line 25 + // unset Y_found, line 26 + context.b_Y_found = false + // do, line 27 + var v_1 = env.Cursor +lab0: + for { + // (, line 27 + // [, line 27 + env.Bra = env.Cursor + // literal, line 27 + if !env.EqS("'") { + break lab0 + } + // ], line 27 + env.Ket = env.Cursor + // delete, line 27 + if !env.SliceDel() { + return false + } + break lab0 + } + env.Cursor = v_1 + // do, line 28 + var v_2 = env.Cursor +lab1: + for { + // (, line 28 + // [, line 28 + env.Bra = env.Cursor + // literal, line 28 + if !env.EqS("y") { + break lab1 + } + // ], line 28 + env.Ket = env.Cursor + // <-, line 28 + if !env.SliceFrom("Y") { + return false + } + // set Y_found, line 28 + context.b_Y_found = true + break lab1 + } + env.Cursor = v_2 + // do, line 29 + var v_3 = env.Cursor +lab2: + for { + // repeat, line 29 + replab3: + for { + var v_4 = env.Cursor + lab4: + for range [2]struct{}{} { + // (, line 29 + // goto, line 29 + golab5: + for { + var v_5 = env.Cursor + lab6: + for { + // (, line 29 + if !env.InGrouping(G_v, 97, 121) { + break lab6 + } + // [, line 29 + env.Bra = env.Cursor + // literal, line 29 + if !env.EqS("y") { + break lab6 + } + // ], line 29 + env.Ket = env.Cursor + env.Cursor = v_5 + break golab5 + } + env.Cursor = v_5 + if env.Cursor >= env.Limit { + break lab4 + } + env.NextChar() + } + // <-, line 29 + if !env.SliceFrom("Y") { + return false + } + // set Y_found, line 29 + context.b_Y_found = true + continue replab3 + } + env.Cursor = v_4 + break replab3 + } + break lab2 + } + env.Cursor = v_3 + return true +} + +func r_mark_regions(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + // (, line 32 + context.i_p1 = env.Limit + context.i_p2 = env.Limit + // do, line 35 + var v_1 = env.Cursor +lab0: + for { + // (, line 35 + // or, line 41 + lab1: + for { + var v_2 = env.Cursor + lab2: + for { + // among, line 36 + if env.FindAmong(A_0, context) == 0 { + break lab2 + } + break lab1 + } + env.Cursor = v_2 + // (, line 41 + // gopast, line 41 + golab3: + for { + lab4: + for { + if !env.InGrouping(G_v, 97, 121) { + break lab4 + } + break golab3 + } + if env.Cursor >= env.Limit { + break lab0 + } + env.NextChar() + } + // gopast, line 41 + golab5: + for { + lab6: + for { + if !env.OutGrouping(G_v, 97, 121) { + break lab6 + } + break golab5 + } + if env.Cursor >= env.Limit { + break lab0 + } + env.NextChar() + } + break lab1 + } + // setmark p1, line 42 + context.i_p1 = env.Cursor + // gopast, line 43 + golab7: + for { + lab8: + for { + if !env.InGrouping(G_v, 97, 121) { + break lab8 + } + break golab7 + } + if env.Cursor >= env.Limit { + break lab0 + } + env.NextChar() + } + // gopast, line 43 + golab9: + for { + lab10: + for { + if !env.OutGrouping(G_v, 97, 121) { + break lab10 + } + break golab9 + } + if env.Cursor >= env.Limit { + break lab0 + } + env.NextChar() + } + // setmark p2, line 43 + context.i_p2 = env.Cursor + break lab0 + } + env.Cursor = v_1 + return true +} + +func r_shortv(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + // (, line 49 + // or, line 51 +lab0: + for { + var v_1 = env.Limit - env.Cursor + lab1: + for { + // (, line 50 + if !env.OutGroupingB(G_v_WXY, 89, 121) { + break lab1 + } + if !env.InGroupingB(G_v, 97, 121) { + break lab1 + } + if !env.OutGroupingB(G_v, 97, 121) { + break lab1 + } + break lab0 + } + env.Cursor = env.Limit - v_1 + // (, line 52 + if !env.OutGroupingB(G_v, 97, 121) { + return false + } + if !env.InGroupingB(G_v, 97, 121) { + return false + } + // atlimit, line 52 + if env.Cursor > env.LimitBackward { + return false + } + break lab0 + } + return true +} + +func r_R1(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + if !(context.i_p1 <= env.Cursor) { + return false + } + return true +} + +func r_R2(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + if !(context.i_p2 <= env.Cursor) { + return false + } + return true +} + +func r_Step_1a(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 58 + // try, line 59 + var v_1 = env.Limit - env.Cursor +lab0: + for { + // (, line 59 + // [, line 60 + env.Ket = env.Cursor + // substring, line 60 + among_var = env.FindAmongB(A_1, context) + if among_var == 0 { + env.Cursor = env.Limit - v_1 + break lab0 + } + // ], line 60 + env.Bra = env.Cursor + if among_var == 0 { + env.Cursor = env.Limit - v_1 + break lab0 + } else if among_var == 1 { + // (, line 62 + // delete, line 62 + if !env.SliceDel() { + return false + } + } + break lab0 + } + // [, line 65 + env.Ket = env.Cursor + // substring, line 65 + among_var = env.FindAmongB(A_2, context) + if among_var == 0 { + return false + } + // ], line 65 + env.Bra = env.Cursor + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 66 + // <-, line 66 + if !env.SliceFrom("ss") { + return false + } + } else if among_var == 2 { + // (, line 68 + // or, line 68 + lab1: + for { + var v_2 = env.Limit - env.Cursor + lab2: + for { + // (, line 68 + { + // hop, line 68 + var c = env.ByteIndexForHop(-(2)) + if int32(env.LimitBackward) > c || c > int32(env.Limit) { + break lab2 + } + env.Cursor = int(c) + } + // <-, line 68 + if !env.SliceFrom("i") { + return false + } + break lab1 + } + env.Cursor = env.Limit - v_2 + // <-, line 68 + if !env.SliceFrom("ie") { + return false + } + break lab1 + } + } else if among_var == 3 { + // (, line 69 + // next, line 69 + if env.Cursor <= env.LimitBackward { + return false + } + env.PrevChar() + // gopast, line 69 + golab3: + for { + lab4: + for { + if !env.InGroupingB(G_v, 97, 121) { + break lab4 + } + break golab3 + } + if env.Cursor <= env.LimitBackward { + return false + } + env.PrevChar() + } + // delete, line 69 + if !env.SliceDel() { + return false + } + } + return true +} + +func r_Step_1b(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 74 + // [, line 75 + env.Ket = env.Cursor + // substring, line 75 + among_var = env.FindAmongB(A_4, context) + if among_var == 0 { + return false + } + // ], line 75 + env.Bra = env.Cursor + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 77 + // call R1, line 77 + if !r_R1(env, context) { + return false + } + // <-, line 77 + if !env.SliceFrom("ee") { + return false + } + } else if among_var == 2 { + // (, line 79 + // test, line 80 + var v_1 = env.Limit - env.Cursor + // gopast, line 80 + golab0: + for { + lab1: + for { + if !env.InGroupingB(G_v, 97, 121) { + break lab1 + } + break golab0 + } + if env.Cursor <= env.LimitBackward { + return false + } + env.PrevChar() + } + env.Cursor = env.Limit - v_1 + // delete, line 80 + if !env.SliceDel() { + return false + } + // test, line 81 + var v_3 = env.Limit - env.Cursor + // substring, line 81 + among_var = env.FindAmongB(A_3, context) + if among_var == 0 { + return false + } + env.Cursor = env.Limit - v_3 + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 83 + { + // <+, line 83 + var c = env.Cursor + bra, ket := env.Cursor, env.Cursor + env.Insert(bra, ket, "e") + env.Cursor = c + } + } else if among_var == 2 { + // (, line 86 + // [, line 86 + env.Ket = env.Cursor + // next, line 86 + if env.Cursor <= env.LimitBackward { + return false + } + env.PrevChar() + // ], line 86 + env.Bra = env.Cursor + // delete, line 86 + if !env.SliceDel() { + return false + } + } else if among_var == 3 { + // (, line 87 + // atmark, line 87 + if env.Cursor != context.i_p1 { + return false + } + // test, line 87 + var v_4 = env.Limit - env.Cursor + // call shortv, line 87 + if !r_shortv(env, context) { + return false + } + env.Cursor = env.Limit - v_4 + { + // <+, line 87 + var c = env.Cursor + bra, ket := env.Cursor, env.Cursor + env.Insert(bra, ket, "e") + env.Cursor = c + } + } + } + return true +} + +func r_Step_1c(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + // (, line 93 + // [, line 94 + env.Ket = env.Cursor + // or, line 94 +lab0: + for { + var v_1 = env.Limit - env.Cursor + lab1: + for { + // literal, line 94 + if !env.EqSB("y") { + break lab1 + } + break lab0 + } + env.Cursor = env.Limit - v_1 + // literal, line 94 + if !env.EqSB("Y") { + return false + } + break lab0 + } + // ], line 94 + env.Bra = env.Cursor + if !env.OutGroupingB(G_v, 97, 121) { + return false + } + // not, line 95 + var v_2 = env.Limit - env.Cursor +lab2: + for { + // atlimit, line 95 + if env.Cursor > env.LimitBackward { + break lab2 + } + return false + } + env.Cursor = env.Limit - v_2 + // <-, line 96 + if !env.SliceFrom("i") { + return false + } + return true +} + +func r_Step_2(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 99 + // [, line 100 + env.Ket = env.Cursor + // substring, line 100 + among_var = env.FindAmongB(A_5, context) + if among_var == 0 { + return false + } + // ], line 100 + env.Bra = env.Cursor + // call R1, line 100 + if !r_R1(env, context) { + return false + } + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 101 + // <-, line 101 + if !env.SliceFrom("tion") { + return false + } + } else if among_var == 2 { + // (, line 102 + // <-, line 102 + if !env.SliceFrom("ence") { + return false + } + } else if among_var == 3 { + // (, line 103 + // <-, line 103 + if !env.SliceFrom("ance") { + return false + } + } else if among_var == 4 { + // (, line 104 + // <-, line 104 + if !env.SliceFrom("able") { + return false + } + } else if among_var == 5 { + // (, line 105 + // <-, line 105 + if !env.SliceFrom("ent") { + return false + } + } else if among_var == 6 { + // (, line 107 + // <-, line 107 + if !env.SliceFrom("ize") { + return false + } + } else if among_var == 7 { + // (, line 109 + // <-, line 109 + if !env.SliceFrom("ate") { + return false + } + } else if among_var == 8 { + // (, line 111 + // <-, line 111 + if !env.SliceFrom("al") { + return false + } + } else if among_var == 9 { + // (, line 112 + // <-, line 112 + if !env.SliceFrom("ful") { + return false + } + } else if among_var == 10 { + // (, line 114 + // <-, line 114 + if !env.SliceFrom("ous") { + return false + } + } else if among_var == 11 { + // (, line 116 + // <-, line 116 + if !env.SliceFrom("ive") { + return false + } + } else if among_var == 12 { + // (, line 118 + // <-, line 118 + if !env.SliceFrom("ble") { + return false + } + } else if among_var == 13 { + // (, line 119 + // literal, line 119 + if !env.EqSB("l") { + return false + } + // <-, line 119 + if !env.SliceFrom("og") { + return false + } + } else if among_var == 14 { + // (, line 120 + // <-, line 120 + if !env.SliceFrom("ful") { + return false + } + } else if among_var == 15 { + // (, line 121 + // <-, line 121 + if !env.SliceFrom("less") { + return false + } + } else if among_var == 16 { + // (, line 122 + if !env.InGroupingB(G_valid_LI, 99, 116) { + return false + } + // delete, line 122 + if !env.SliceDel() { + return false + } + } + return true +} + +func r_Step_3(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 126 + // [, line 127 + env.Ket = env.Cursor + // substring, line 127 + among_var = env.FindAmongB(A_6, context) + if among_var == 0 { + return false + } + // ], line 127 + env.Bra = env.Cursor + // call R1, line 127 + if !r_R1(env, context) { + return false + } + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 128 + // <-, line 128 + if !env.SliceFrom("tion") { + return false + } + } else if among_var == 2 { + // (, line 129 + // <-, line 129 + if !env.SliceFrom("ate") { + return false + } + } else if among_var == 3 { + // (, line 130 + // <-, line 130 + if !env.SliceFrom("al") { + return false + } + } else if among_var == 4 { + // (, line 132 + // <-, line 132 + if !env.SliceFrom("ic") { + return false + } + } else if among_var == 5 { + // (, line 134 + // delete, line 134 + if !env.SliceDel() { + return false + } + } else if among_var == 6 { + // (, line 136 + // call R2, line 136 + if !r_R2(env, context) { + return false + } + // delete, line 136 + if !env.SliceDel() { + return false + } + } + return true +} + +func r_Step_4(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 140 + // [, line 141 + env.Ket = env.Cursor + // substring, line 141 + among_var = env.FindAmongB(A_7, context) + if among_var == 0 { + return false + } + // ], line 141 + env.Bra = env.Cursor + // call R2, line 141 + if !r_R2(env, context) { + return false + } + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 144 + // delete, line 144 + if !env.SliceDel() { + return false + } + } else if among_var == 2 { + // (, line 145 + // or, line 145 + lab0: + for { + var v_1 = env.Limit - env.Cursor + lab1: + for { + // literal, line 145 + if !env.EqSB("s") { + break lab1 + } + break lab0 + } + env.Cursor = env.Limit - v_1 + // literal, line 145 + if !env.EqSB("t") { + return false + } + break lab0 + } + // delete, line 145 + if !env.SliceDel() { + return false + } + } + return true +} + +func r_Step_5(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 149 + // [, line 150 + env.Ket = env.Cursor + // substring, line 150 + among_var = env.FindAmongB(A_8, context) + if among_var == 0 { + return false + } + // ], line 150 + env.Bra = env.Cursor + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 151 + // or, line 151 + lab0: + for { + var v_1 = env.Limit - env.Cursor + lab1: + for { + // call R2, line 151 + if !r_R2(env, context) { + break lab1 + } + break lab0 + } + env.Cursor = env.Limit - v_1 + // (, line 151 + // call R1, line 151 + if !r_R1(env, context) { + return false + } + // not, line 151 + var v_2 = env.Limit - env.Cursor + lab2: + for { + // call shortv, line 151 + if !r_shortv(env, context) { + break lab2 + } + return false + } + env.Cursor = env.Limit - v_2 + break lab0 + } + // delete, line 151 + if !env.SliceDel() { + return false + } + } else if among_var == 2 { + // (, line 152 + // call R2, line 152 + if !r_R2(env, context) { + return false + } + // literal, line 152 + if !env.EqSB("l") { + return false + } + // delete, line 152 + if !env.SliceDel() { + return false + } + } + return true +} + +func r_exception2(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + // (, line 156 + // [, line 158 + env.Ket = env.Cursor + // substring, line 158 + if env.FindAmongB(A_9, context) == 0 { + return false + } + // ], line 158 + env.Bra = env.Cursor + // atlimit, line 158 + if env.Cursor > env.LimitBackward { + return false + } + return true +} + +func r_exception1(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + var among_var int32 + // (, line 168 + // [, line 170 + env.Bra = env.Cursor + // substring, line 170 + among_var = env.FindAmong(A_10, context) + if among_var == 0 { + return false + } + // ], line 170 + env.Ket = env.Cursor + // atlimit, line 170 + if env.Cursor < env.Limit { + return false + } + if among_var == 0 { + return false + } else if among_var == 1 { + // (, line 174 + // <-, line 174 + if !env.SliceFrom("ski") { + return false + } + } else if among_var == 2 { + // (, line 175 + // <-, line 175 + if !env.SliceFrom("sky") { + return false + } + } else if among_var == 3 { + // (, line 176 + // <-, line 176 + if !env.SliceFrom("die") { + return false + } + } else if among_var == 4 { + // (, line 177 + // <-, line 177 + if !env.SliceFrom("lie") { + return false + } + } else if among_var == 5 { + // (, line 178 + // <-, line 178 + if !env.SliceFrom("tie") { + return false + } + } else if among_var == 6 { + // (, line 182 + // <-, line 182 + if !env.SliceFrom("idl") { + return false + } + } else if among_var == 7 { + // (, line 183 + // <-, line 183 + if !env.SliceFrom("gentl") { + return false + } + } else if among_var == 8 { + // (, line 184 + // <-, line 184 + if !env.SliceFrom("ugli") { + return false + } + } else if among_var == 9 { + // (, line 185 + // <-, line 185 + if !env.SliceFrom("earli") { + return false + } + } else if among_var == 10 { + // (, line 186 + // <-, line 186 + if !env.SliceFrom("onli") { + return false + } + } else if among_var == 11 { + // (, line 187 + // <-, line 187 + if !env.SliceFrom("singl") { + return false + } + } + return true +} + +func r_postlude(env *snowballRuntime.Env, ctx interface{}) bool { + context := ctx.(*Context) + _ = context + // (, line 203 + // Boolean test Y_found, line 203 + if !context.b_Y_found { + return false + } + // repeat, line 203 +replab0: + for { + var v_1 = env.Cursor + lab1: + for range [2]struct{}{} { + // (, line 203 + // goto, line 203 + golab2: + for { + var v_2 = env.Cursor + lab3: + for { + // (, line 203 + // [, line 203 + env.Bra = env.Cursor + // literal, line 203 + if !env.EqS("Y") { + break lab3 + } + // ], line 203 + env.Ket = env.Cursor + env.Cursor = v_2 + break golab2 + } + env.Cursor = v_2 + if env.Cursor >= env.Limit { + break lab1 + } + env.NextChar() + } + // <-, line 203 + if !env.SliceFrom("y") { + return false + } + continue replab0 + } + env.Cursor = v_1 + break replab0 + } + return true +} + +func Stem(env *snowballRuntime.Env) bool { + var context = &Context{ + b_Y_found: false, + i_p2: 0, + i_p1: 0, + } + _ = context + // (, line 205 + // or, line 207 +lab0: + for { + var v_1 = env.Cursor + lab1: + for { + // call exception1, line 207 + if !r_exception1(env, context) { + break lab1 + } + break lab0 + } + env.Cursor = v_1 + lab2: + for { + // not, line 208 + var v_2 = env.Cursor + lab3: + for { + { + // hop, line 208 + var c = env.ByteIndexForHop((3)) + if int32(0) > c || c > int32(env.Limit) { + break lab3 + } + env.Cursor = int(c) + } + break lab2 + } + env.Cursor = v_2 + break lab0 + } + env.Cursor = v_1 + // (, line 208 + // do, line 209 + var v_3 = env.Cursor + lab4: + for { + // call prelude, line 209 + if !r_prelude(env, context) { + break lab4 + } + break lab4 + } + env.Cursor = v_3 + // do, line 210 + var v_4 = env.Cursor + lab5: + for { + // call mark_regions, line 210 + if !r_mark_regions(env, context) { + break lab5 + } + break lab5 + } + env.Cursor = v_4 + // backwards, line 211 + env.LimitBackward = env.Cursor + env.Cursor = env.Limit + // (, line 211 + // do, line 213 + var v_5 = env.Limit - env.Cursor + lab6: + for { + // call Step_1a, line 213 + if !r_Step_1a(env, context) { + break lab6 + } + break lab6 + } + env.Cursor = env.Limit - v_5 + // or, line 215 + lab7: + for { + var v_6 = env.Limit - env.Cursor + lab8: + for { + // call exception2, line 215 + if !r_exception2(env, context) { + break lab8 + } + break lab7 + } + env.Cursor = env.Limit - v_6 + // (, line 215 + // do, line 217 + var v_7 = env.Limit - env.Cursor + lab9: + for { + // call Step_1b, line 217 + if !r_Step_1b(env, context) { + break lab9 + } + break lab9 + } + env.Cursor = env.Limit - v_7 + // do, line 218 + var v_8 = env.Limit - env.Cursor + lab10: + for { + // call Step_1c, line 218 + if !r_Step_1c(env, context) { + break lab10 + } + break lab10 + } + env.Cursor = env.Limit - v_8 + // do, line 220 + var v_9 = env.Limit - env.Cursor + lab11: + for { + // call Step_2, line 220 + if !r_Step_2(env, context) { + break lab11 + } + break lab11 + } + env.Cursor = env.Limit - v_9 + // do, line 221 + var v_10 = env.Limit - env.Cursor + lab12: + for { + // call Step_3, line 221 + if !r_Step_3(env, context) { + break lab12 + } + break lab12 + } + env.Cursor = env.Limit - v_10 + // do, line 222 + var v_11 = env.Limit - env.Cursor + lab13: + for { + // call Step_4, line 222 + if !r_Step_4(env, context) { + break lab13 + } + break lab13 + } + env.Cursor = env.Limit - v_11 + // do, line 224 + var v_12 = env.Limit - env.Cursor + lab14: + for { + // call Step_5, line 224 + if !r_Step_5(env, context) { + break lab14 + } + break lab14 + } + env.Cursor = env.Limit - v_12 + break lab7 + } + env.Cursor = env.LimitBackward + // do, line 227 + var v_13 = env.Cursor + lab15: + for { + // call postlude, line 227 + if !r_postlude(env, context) { + break lab15 + } + break lab15 + } + env.Cursor = v_13 + break lab0 + } + return true +} diff --git a/vendor/github.com/blevesearch/snowballstem/env.go b/vendor/github.com/blevesearch/snowballstem/env.go new file mode 100644 index 0000000000..6636994ac7 --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/env.go @@ -0,0 +1,389 @@ +package snowballstem + +import ( + "log" + "strings" + "unicode/utf8" +) + +// Env represents the Snowball execution environment +type Env struct { + current string + Cursor int + Limit int + LimitBackward int + Bra int + Ket int +} + +// NewEnv creates a new Snowball execution environment on the provided string +func NewEnv(val string) *Env { + return &Env{ + current: val, + Cursor: 0, + Limit: len(val), + LimitBackward: 0, + Bra: 0, + Ket: len(val), + } +} + +func (env *Env) Current() string { + return env.current +} + +func (env *Env) SetCurrent(s string) { + env.current = s + env.Cursor = 0 + env.Limit = len(s) + env.LimitBackward = 0 + env.Bra = 0 + env.Ket = len(s) +} + +func (env *Env) ReplaceS(bra, ket int, s string) int32 { + adjustment := int32(len(s)) - (int32(ket) - int32(bra)) + result, _ := splitAt(env.current, bra) + rsplit := ket + if ket < bra { + rsplit = bra + } + _, rhs := splitAt(env.current, rsplit) + result += s + result += rhs + + newLim := int32(env.Limit) + adjustment + env.Limit = int(newLim) + + if env.Cursor >= ket { + newCur := int32(env.Cursor) + adjustment + env.Cursor = int(newCur) + } else if env.Cursor > bra { + env.Cursor = bra + } + + env.current = result + return adjustment +} + +func (env *Env) EqS(s string) bool { + if env.Cursor >= env.Limit { + return false + } + + if strings.HasPrefix(env.current[env.Cursor:], s) { + env.Cursor += len(s) + for !onCharBoundary(env.current, env.Cursor) { + env.Cursor++ + } + return true + } + return false +} + +func (env *Env) EqSB(s string) bool { + if int32(env.Cursor)-int32(env.LimitBackward) < int32(len(s)) { + return false + } else if !onCharBoundary(env.current, env.Cursor-len(s)) || + !strings.HasPrefix(env.current[env.Cursor-len(s):], s) { + return false + } else { + env.Cursor -= len(s) + return true + } +} + +func (env *Env) SliceFrom(s string) bool { + bra, ket := env.Bra, env.Ket + env.ReplaceS(bra, ket, s) + return true +} + +func (env *Env) NextChar() { + env.Cursor++ + for !onCharBoundary(env.current, env.Cursor) { + env.Cursor++ + } +} + +func (env *Env) PrevChar() { + env.Cursor-- + for !onCharBoundary(env.current, env.Cursor) { + env.Cursor-- + } +} + +func (env *Env) ByteIndexForHop(delta int32) int32 { + if delta > 0 { + res := env.Cursor + for delta > 0 { + res++ + delta-- + for res <= len(env.current) && !onCharBoundary(env.current, res) { + res++ + } + } + return int32(res) + } else if delta < 0 { + res := env.Cursor + for delta < 0 { + res-- + delta++ + for res >= 0 && !onCharBoundary(env.current, res) { + res-- + } + } + return int32(res) + } else { + return int32(env.Cursor) + } +} + +func (env *Env) InGrouping(chars []byte, min, max int32) bool { + if env.Cursor >= env.Limit { + return false + } + + r, _ := utf8.DecodeRuneInString(env.current[env.Cursor:]) + if r != utf8.RuneError { + if r > max || r < min { + return false + } + r -= min + if (chars[uint(r>>3)] & (0x1 << uint(r&0x7))) == 0 { + return false + } + env.NextChar() + return true + } + return false +} + +func (env *Env) InGroupingB(chars []byte, min, max int32) bool { + if env.Cursor <= env.LimitBackward { + return false + } + env.PrevChar() + r, _ := utf8.DecodeRuneInString(env.current[env.Cursor:]) + if r != utf8.RuneError { + env.NextChar() + if r > max || r < min { + return false + } + r -= min + if (chars[uint(r>>3)] & (0x1 << uint(r&0x7))) == 0 { + return false + } + env.PrevChar() + return true + } + return false +} + +func (env *Env) OutGrouping(chars []byte, min, max int32) bool { + if env.Cursor >= env.Limit { + return false + } + r, _ := utf8.DecodeRuneInString(env.current[env.Cursor:]) + if r != utf8.RuneError { + if r > max || r < min { + env.NextChar() + return true + } + r -= min + if (chars[uint(r>>3)] & (0x1 << uint(r&0x7))) == 0 { + env.NextChar() + return true + } + } + return false +} + +func (env *Env) OutGroupingB(chars []byte, min, max int32) bool { + if env.Cursor <= env.LimitBackward { + return false + } + env.PrevChar() + r, _ := utf8.DecodeRuneInString(env.current[env.Cursor:]) + if r != utf8.RuneError { + env.NextChar() + if r > max || r < min { + env.PrevChar() + return true + } + r -= min + if (chars[uint(r>>3)] & (0x1 << uint(r&0x7))) == 0 { + env.PrevChar() + return true + } + } + return false +} + +func (env *Env) SliceDel() bool { + return env.SliceFrom("") +} + +func (env *Env) Insert(bra, ket int, s string) { + adjustment := env.ReplaceS(bra, ket, s) + if bra <= env.Bra { + env.Bra = int(int32(env.Bra) + adjustment) + } + if bra <= env.Ket { + env.Ket = int(int32(env.Ket) + adjustment) + } +} + +func (env *Env) SliceTo() string { + return env.current[env.Bra:env.Ket] +} + +func (env *Env) FindAmong(amongs []*Among, ctx interface{}) int32 { + var i int32 + j := int32(len(amongs)) + + c := env.Cursor + l := env.Limit + + var commonI, commonJ int + + firstKeyInspected := false + for { + k := i + ((j - i) >> 1) + var diff int32 + common := min(commonI, commonJ) + w := amongs[k] + for lvar := common; lvar < len(w.Str); lvar++ { + if c+common == l { + diff-- + break + } + diff = int32(env.current[c+common]) - int32(w.Str[lvar]) + if diff != 0 { + break + } + common++ + } + if diff < 0 { + j = k + commonJ = common + } else { + i = k + commonI = common + } + if j-i <= 1 { + if i > 0 { + break + } + if j == i { + break + } + if firstKeyInspected { + break + } + firstKeyInspected = true + } + } + + for { + w := amongs[i] + if commonI >= len(w.Str) { + env.Cursor = c + len(w.Str) + if w.F != nil { + res := w.F(env, ctx) + env.Cursor = c + len(w.Str) + if res { + return w.B + } + } else { + return w.B + } + } + i = w.A + if i < 0 { + return 0 + } + } +} + +func (env *Env) FindAmongB(amongs []*Among, ctx interface{}) int32 { + var i int32 + j := int32(len(amongs)) + + c := env.Cursor + lb := env.LimitBackward + + var commonI, commonJ int + + firstKeyInspected := false + + for { + k := i + ((j - i) >> 1) + diff := int32(0) + common := min(commonI, commonJ) + w := amongs[k] + for lvar := len(w.Str) - int(common) - 1; lvar >= 0; lvar-- { + if c-common == lb { + diff-- + break + } + diff = int32(env.current[c-common-1]) - int32(w.Str[lvar]) + if diff != 0 { + break + } + // Count up commons. But not one character but the byte width of that char + common++ + } + if diff < 0 { + j = k + commonJ = common + } else { + i = k + commonI = common + } + if j-i <= 1 { + if i > 0 { + break + } + if j == i { + break + } + if firstKeyInspected { + break + } + firstKeyInspected = true + } + } + for { + w := amongs[i] + if commonI >= len(w.Str) { + env.Cursor = c - len(w.Str) + if w.F != nil { + res := w.F(env, ctx) + env.Cursor = c - len(w.Str) + if res { + return w.B + } + } else { + return w.B + } + } + i = w.A + if i < 0 { + return 0 + } + } +} + +func (env *Env) Debug(count, lineNumber int) { + log.Printf("snowball debug, count: %d, line: %d", count, lineNumber) +} + +func (env *Env) Clone() *Env { + clone := *env + return &clone +} + +func (env *Env) AssignTo() string { + return env.Current() +} diff --git a/vendor/github.com/blevesearch/snowballstem/gen.go b/vendor/github.com/blevesearch/snowballstem/gen.go new file mode 100644 index 0000000000..92548b0010 --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/gen.go @@ -0,0 +1,61 @@ +package snowballstem + +// to regenerate these commands, run +// go run gengen.go /path/to/snowball/algorithms/directory + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/arabic/stem_Unicode.sbl -go -o arabic/arabic_stemmer -gop arabic -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w arabic/arabic_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/danish/stem_ISO_8859_1.sbl -go -o danish/danish_stemmer -gop danish -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w danish/danish_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/dutch/stem_ISO_8859_1.sbl -go -o dutch/dutch_stemmer -gop dutch -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w dutch/dutch_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/english/stem_ISO_8859_1.sbl -go -o english/english_stemmer -gop english -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w english/english_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/finnish/stem_ISO_8859_1.sbl -go -o finnish/finnish_stemmer -gop finnish -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w finnish/finnish_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/french/stem_ISO_8859_1.sbl -go -o french/french_stemmer -gop french -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w french/french_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/german/stem_ISO_8859_1.sbl -go -o german/german_stemmer -gop german -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w german/german_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/hungarian/stem_Unicode.sbl -go -o hungarian/hungarian_stemmer -gop hungarian -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w hungarian/hungarian_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/irish/stem_ISO_8859_1.sbl -go -o irish/irish_stemmer -gop irish -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w irish/irish_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/italian/stem_ISO_8859_1.sbl -go -o italian/italian_stemmer -gop italian -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w italian/italian_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/norwegian/stem_ISO_8859_1.sbl -go -o norwegian/norwegian_stemmer -gop norwegian -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w norwegian/norwegian_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/porter/stem_ISO_8859_1.sbl -go -o porter/porter_stemmer -gop porter -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w porter/porter_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/portuguese/stem_ISO_8859_1.sbl -go -o portuguese/portuguese_stemmer -gop portuguese -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w portuguese/portuguese_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/romanian/stem_Unicode.sbl -go -o romanian/romanian_stemmer -gop romanian -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w romanian/romanian_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/russian/stem_Unicode.sbl -go -o russian/russian_stemmer -gop russian -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w russian/russian_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/spanish/stem_ISO_8859_1.sbl -go -o spanish/spanish_stemmer -gop spanish -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w spanish/spanish_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/swedish/stem_ISO_8859_1.sbl -go -o swedish/swedish_stemmer -gop swedish -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w swedish/swedish_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/tamil/stem_Unicode.sbl -go -o tamil/tamil_stemmer -gop tamil -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w tamil/tamil_stemmer.go + +//go:generate $SNOWBALL/snowball $SNOWBALL/algorithms/turkish/stem_Unicode.sbl -go -o turkish/turkish_stemmer -gop turkish -gor github.com/blevesearch/snowballstem +//go:generate gofmt -s -w turkish/turkish_stemmer.go diff --git a/vendor/github.com/blevesearch/snowballstem/go.mod b/vendor/github.com/blevesearch/snowballstem/go.mod new file mode 100644 index 0000000000..12218e2d67 --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/go.mod @@ -0,0 +1,3 @@ +module github.com/blevesearch/snowballstem + +go 1.13 diff --git a/vendor/github.com/blevesearch/snowballstem/util.go b/vendor/github.com/blevesearch/snowballstem/util.go new file mode 100644 index 0000000000..7c68f6e750 --- /dev/null +++ b/vendor/github.com/blevesearch/snowballstem/util.go @@ -0,0 +1,34 @@ +package snowballstem + +import ( + "math" + "unicode/utf8" +) + +const MaxInt = math.MaxInt32 +const MinInt = math.MinInt32 + +func splitAt(str string, mid int) (string, string) { + return str[:mid], str[mid:] +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func onCharBoundary(s string, pos int) bool { + if pos <= 0 || pos >= len(s) { + return true + } + return utf8.RuneStart(s[pos]) +} + +// RuneCountInString is a wrapper around utf8.RuneCountInString +// this allows us to not have to conditionally include +// the utf8 package into some stemmers and not others +func RuneCountInString(str string) int { + return utf8.RuneCountInString(str) +} diff --git a/vendor/github.com/blevesearch/zap/v11/.gitignore b/vendor/github.com/blevesearch/zap/v11/.gitignore new file mode 100644 index 0000000000..46d1cfad54 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v11/.gitignore @@ -0,0 +1,12 @@ +#* +*.sublime-* +*~ +.#* +.project +.settings +**/.idea/ +**/*.iml +.DS_Store +/cmd/zap/zap +*.test +tags diff --git a/vendor/github.com/blevesearch/zap/v11/LICENSE b/vendor/github.com/blevesearch/zap/v11/LICENSE new file mode 100644 index 0000000000..7a4a3ea242 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v11/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/README.md b/vendor/github.com/blevesearch/zap/v11/README.md similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/README.md rename to vendor/github.com/blevesearch/zap/v11/README.md diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/build.go b/vendor/github.com/blevesearch/zap/v11/build.go similarity index 97% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/build.go rename to vendor/github.com/blevesearch/zap/v11/build.go index c02333cee0..bac1edb549 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/build.go +++ b/vendor/github.com/blevesearch/zap/v11/build.go @@ -16,9 +16,10 @@ package zap import ( "bufio" - "github.com/couchbase/vellum" "math" "os" + + "github.com/couchbase/vellum" ) const Version uint32 = 11 @@ -27,6 +28,10 @@ const Type string = "zap" const fieldNotUninverted = math.MaxUint64 +func (sb *SegmentBase) Persist(path string) error { + return PersistSegmentBase(sb, path) +} + // PersistSegmentBase persists SegmentBase in the zap file format. func PersistSegmentBase(sb *SegmentBase, path string) error { flag := os.O_RDWR | os.O_CREATE diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/contentcoder.go b/vendor/github.com/blevesearch/zap/v11/contentcoder.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/contentcoder.go rename to vendor/github.com/blevesearch/zap/v11/contentcoder.go diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/count.go b/vendor/github.com/blevesearch/zap/v11/count.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/count.go rename to vendor/github.com/blevesearch/zap/v11/count.go diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/dict.go b/vendor/github.com/blevesearch/zap/v11/dict.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/dict.go rename to vendor/github.com/blevesearch/zap/v11/dict.go diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/docvalues.go b/vendor/github.com/blevesearch/zap/v11/docvalues.go similarity index 99% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/docvalues.go rename to vendor/github.com/blevesearch/zap/v11/docvalues.go index a819ca239f..2566dc6d8c 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/docvalues.go +++ b/vendor/github.com/blevesearch/zap/v11/docvalues.go @@ -76,10 +76,6 @@ func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { return rv } -func (di *docValueReader) fieldName() string { - return di.field -} - func (di *docValueReader) curChunkNumber() uint64 { return di.curChunkNum } diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/enumerator.go b/vendor/github.com/blevesearch/zap/v11/enumerator.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/enumerator.go rename to vendor/github.com/blevesearch/zap/v11/enumerator.go diff --git a/vendor/github.com/blevesearch/zap/v11/go.mod b/vendor/github.com/blevesearch/zap/v11/go.mod new file mode 100644 index 0000000000..a238188e66 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v11/go.mod @@ -0,0 +1,12 @@ +module github.com/blevesearch/zap/v11 + +go 1.12 + +require ( + github.com/RoaringBitmap/roaring v0.4.21 + github.com/blevesearch/bleve v1.0.7 + github.com/blevesearch/mmap-go v1.0.2 + github.com/couchbase/vellum v1.0.1 + github.com/golang/snappy v0.0.1 + github.com/spf13/cobra v0.0.5 +) diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/intcoder.go b/vendor/github.com/blevesearch/zap/v11/intcoder.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/intcoder.go rename to vendor/github.com/blevesearch/zap/v11/intcoder.go diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/merge.go b/vendor/github.com/blevesearch/zap/v11/merge.go similarity index 96% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/merge.go rename to vendor/github.com/blevesearch/zap/v11/merge.go index 50bd7207a5..0d3f5458a2 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/merge.go +++ b/vendor/github.com/blevesearch/zap/v11/merge.go @@ -31,32 +31,30 @@ import ( var DefaultFileMergerBufferSize = 1024 * 1024 -// ValidateMerge can be set by applications to perform additional checks -// on a new segment produced by a merge, by default this does nothing. -// Caller should provide EITHER segments or memSegments, but not both. -// This API is experimental and may be removed at any time. -var ValidateMerge = func(segments []*Segment, memSegments []*SegmentBase, drops []*roaring.Bitmap, newSegment *Segment) error { - return nil -} - const docDropped = math.MaxUint64 // sentinel docNum to represent a deleted doc -// Merge takes a slice of zap segments and bit masks describing which +// Merge takes a slice of segments and bit masks describing which // documents may be dropped, and creates a new segment containing the -// remaining data. This new segment is built at the specified path, -// with the provided chunkFactor. -func Merge(segments []*Segment, drops []*roaring.Bitmap, path string, - chunkFactor uint32, closeCh chan struct{}, s seg.StatsReporter) ( +// remaining data. This new segment is built at the specified path. +func (*ZapPlugin) Merge(segments []seg.Segment, drops []*roaring.Bitmap, path string, + closeCh chan struct{}, s seg.StatsReporter) ( [][]uint64, uint64, error) { + segmentBases := make([]*SegmentBase, len(segments)) for segmenti, segment := range segments { - segmentBases[segmenti] = &segment.SegmentBase + switch segmentx := segment.(type) { + case *Segment: + segmentBases[segmenti] = &segmentx.SegmentBase + case *SegmentBase: + segmentBases[segmenti] = segmentx + default: + panic(fmt.Sprintf("oops, unexpected segment type: %T", segment)) + } } - - return MergeSegmentBases(segmentBases, drops, path, chunkFactor, closeCh, s) + return mergeSegmentBases(segmentBases, drops, path, defaultChunkFactor, closeCh, s) } -func MergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, path string, +func mergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, path string, chunkFactor uint32, closeCh chan struct{}, s seg.StatsReporter) ( [][]uint64, uint64, error) { flag := os.O_RDWR | os.O_CREATE diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/new.go b/vendor/github.com/blevesearch/zap/v11/new.go similarity index 98% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/new.go rename to vendor/github.com/blevesearch/zap/v11/new.go index c108ec16dd..6c75f2fd16 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/new.go +++ b/vendor/github.com/blevesearch/zap/v11/new.go @@ -25,6 +25,7 @@ import ( "github.com/blevesearch/bleve/analysis" "github.com/blevesearch/bleve/document" "github.com/blevesearch/bleve/index" + "github.com/blevesearch/bleve/index/scorch/segment" "github.com/couchbase/vellum" "github.com/golang/snappy" ) @@ -41,10 +42,17 @@ var ValidateDocFields = func(field document.Field) error { return nil } +var defaultChunkFactor uint32 = 1024 + // AnalysisResultsToSegmentBase produces an in-memory zap-encoded // SegmentBase from analysis results -func AnalysisResultsToSegmentBase(results []*index.AnalysisResult, - chunkFactor uint32) (*SegmentBase, uint64, error) { +func (z *ZapPlugin) New(results []*index.AnalysisResult) ( + segment.Segment, uint64, error) { + return z.newWithChunkFactor(results, defaultChunkFactor) +} + +func (*ZapPlugin) newWithChunkFactor(results []*index.AnalysisResult, + chunkFactor uint32) (segment.Segment, uint64, error) { s := interimPool.Get().(*interim) var br bytes.Buffer diff --git a/vendor/github.com/blevesearch/zap/v11/plugin.go b/vendor/github.com/blevesearch/zap/v11/plugin.go new file mode 100644 index 0000000000..38a0638d4d --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v11/plugin.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "github.com/blevesearch/bleve/index/scorch/segment" +) + +// ZapPlugin implements the Plugin interface of +// the blevesearch/bleve/index/scorch/segment pkg +type ZapPlugin struct{} + +func (*ZapPlugin) Type() string { + return Type +} + +func (*ZapPlugin) Version() uint32 { + return Version +} + +// Plugin returns an instance segment.Plugin for use +// by the Scorch indexing scheme +func Plugin() segment.Plugin { + return &ZapPlugin{} +} diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/posting.go b/vendor/github.com/blevesearch/zap/v11/posting.go similarity index 97% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/posting.go rename to vendor/github.com/blevesearch/zap/v11/posting.go index 4c43fdb9b9..619dc4c323 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/posting.go +++ b/vendor/github.com/blevesearch/zap/v11/posting.go @@ -793,10 +793,23 @@ func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { return 0, false } +// ActualBitmap returns the underlying actual bitmap +// which can be used up the stack for optimizations +func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { + return p.ActualBM +} + +// ReplaceActual replaces the ActualBM with the provided +// bitmap +func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { + p.ActualBM = abm + p.Actual = abm.Iterator() +} + // PostingsIteratorFromBitmap constructs a PostingsIterator given an // "actual" bitmap. func PostingsIteratorFromBitmap(bm *roaring.Bitmap, - includeFreqNorm, includeLocs bool) (*PostingsIterator, error) { + includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { return &PostingsIterator{ ActualBM: bm, Actual: bm.Iterator(), @@ -807,11 +820,11 @@ func PostingsIteratorFromBitmap(bm *roaring.Bitmap, // PostingsIteratorFrom1Hit constructs a PostingsIterator given a // 1-hit docNum. -func PostingsIteratorFrom1Hit(docNum1Hit, normBits1Hit uint64, - includeFreqNorm, includeLocs bool) (*PostingsIterator, error) { +func PostingsIteratorFrom1Hit(docNum1Hit uint64, + includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { return &PostingsIterator{ docNum1Hit: docNum1Hit, - normBits1Hit: normBits1Hit, + normBits1Hit: NormBits1Hit, includeFreqNorm: includeFreqNorm, includeLocs: includeLocs, }, nil diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/read.go b/vendor/github.com/blevesearch/zap/v11/read.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/read.go rename to vendor/github.com/blevesearch/zap/v11/read.go diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/segment.go b/vendor/github.com/blevesearch/zap/v11/segment.go similarity index 99% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/segment.go rename to vendor/github.com/blevesearch/zap/v11/segment.go index 5aa33a26c9..517b6afbfc 100644 --- a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/segment.go +++ b/vendor/github.com/blevesearch/zap/v11/segment.go @@ -27,7 +27,7 @@ import ( "github.com/blevesearch/bleve/index/scorch/segment" "github.com/blevesearch/bleve/size" "github.com/couchbase/vellum" - mmap "github.com/edsrzf/mmap-go" + mmap "github.com/blevesearch/mmap-go" "github.com/golang/snappy" ) @@ -39,7 +39,7 @@ func init() { } // Open returns a zap impl of a segment -func Open(path string) (segment.Segment, error) { +func (*ZapPlugin) Open(path string) (segment.Segment, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -116,7 +116,7 @@ func (sb *SegmentBase) updateSize() { cap(sb.mem) // fieldsMap - for k, _ := range sb.fieldsMap { + for k := range sb.fieldsMap { sizeInBytes += (len(k) + size.SizeOfString) + size.SizeOfUint16 } diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/write.go b/vendor/github.com/blevesearch/zap/v11/write.go similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/write.go rename to vendor/github.com/blevesearch/zap/v11/write.go diff --git a/vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/zap.md b/vendor/github.com/blevesearch/zap/v11/zap.md similarity index 100% rename from vendor/github.com/blevesearch/bleve/index/scorch/segment/zap/zap.md rename to vendor/github.com/blevesearch/zap/v11/zap.md diff --git a/vendor/github.com/blevesearch/zap/v12/.gitignore b/vendor/github.com/blevesearch/zap/v12/.gitignore new file mode 100644 index 0000000000..46d1cfad54 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/.gitignore @@ -0,0 +1,12 @@ +#* +*.sublime-* +*~ +.#* +.project +.settings +**/.idea/ +**/*.iml +.DS_Store +/cmd/zap/zap +*.test +tags diff --git a/vendor/github.com/blevesearch/zap/v12/LICENSE b/vendor/github.com/blevesearch/zap/v12/LICENSE new file mode 100644 index 0000000000..7a4a3ea242 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/blevesearch/zap/v12/README.md b/vendor/github.com/blevesearch/zap/v12/README.md new file mode 100644 index 0000000000..0facb669fd --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/README.md @@ -0,0 +1,158 @@ +# zap file format + +Advanced ZAP File Format Documentation is [here](zap.md). + +The file is written in the reverse order that we typically access data. This helps us write in one pass since later sections of the file require file offsets of things we've already written. + +Current usage: + +- mmap the entire file +- crc-32 bytes and version are in fixed position at end of the file +- reading remainder of footer could be version specific +- remainder of footer gives us: + - 3 important offsets (docValue , fields index and stored data index) + - 2 important values (number of docs and chunk factor) +- field data is processed once and memoized onto the heap so that we never have to go back to disk for it +- access to stored data by doc number means first navigating to the stored data index, then accessing a fixed position offset into that slice, which gives us the actual address of the data. the first bytes of that section tell us the size of data so that we know where it ends. +- access to all other indexed data follows the following pattern: + - first know the field name -> convert to id + - next navigate to term dictionary for that field + - some operations stop here and do dictionary ops + - next use dictionary to navigate to posting list for a specific term + - walk posting list + - if necessary, walk posting details as we go + - if location info is desired, consult location bitmap to see if it is there + +## stored fields section + +- for each document + - preparation phase: + - produce a slice of metadata bytes and data bytes + - produce these slices in field id order + - field value is appended to the data slice + - metadata slice is varint encoded with the following values for each field value + - field id (uint16) + - field type (byte) + - field value start offset in uncompressed data slice (uint64) + - field value length (uint64) + - field number of array positions (uint64) + - one additional value for each array position (uint64) + - compress the data slice using snappy + - file writing phase: + - remember the start offset for this document + - write out meta data length (varint uint64) + - write out compressed data length (varint uint64) + - write out the metadata bytes + - write out the compressed data bytes + +## stored fields idx + +- for each document + - write start offset (remembered from previous section) of stored data (big endian uint64) + +With this index and a known document number, we have direct access to all the stored field data. + +## posting details (freq/norm) section + +- for each posting list + - produce a slice containing multiple consecutive chunks (each chunk is varint stream) + - produce a slice remembering offsets of where each chunk starts + - preparation phase: + - for each hit in the posting list + - if this hit is in next chunk close out encoding of last chunk and record offset start of next + - encode term frequency (uint64) + - encode norm factor (float32) + - file writing phase: + - remember start position for this posting list details + - write out number of chunks that follow (varint uint64) + - write out length of each chunk (each a varint uint64) + - write out the byte slice containing all the chunk data + +If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. + +## posting details (location) section + +- for each posting list + - produce a slice containing multiple consecutive chunks (each chunk is varint stream) + - produce a slice remembering offsets of where each chunk starts + - preparation phase: + - for each hit in the posting list + - if this hit is in next chunk close out encoding of last chunk and record offset start of next + - encode field (uint16) + - encode field pos (uint64) + - encode field start (uint64) + - encode field end (uint64) + - encode number of array positions to follow (uint64) + - encode each array position (each uint64) + - file writing phase: + - remember start position for this posting list details + - write out number of chunks that follow (varint uint64) + - write out length of each chunk (each a varint uint64) + - write out the byte slice containing all the chunk data + +If you know the doc number you're interested in, this format lets you jump to the correct chunk (docNum/chunkFactor) directly and then seek within that chunk until you find it. + +## postings list section + +- for each posting list + - preparation phase: + - encode roaring bitmap posting list to bytes (so we know the length) + - file writing phase: + - remember the start position for this posting list + - write freq/norm details offset (remembered from previous, as varint uint64) + - write location details offset (remembered from previous, as varint uint64) + - write length of encoded roaring bitmap + - write the serialized roaring bitmap data + +## dictionary + +- for each field + - preparation phase: + - encode vellum FST with dictionary data pointing to file offset of posting list (remembered from previous) + - file writing phase: + - remember the start position of this persistDictionary + - write length of vellum data (varint uint64) + - write out vellum data + +## fields section + +- for each field + - file writing phase: + - remember start offset for each field + - write dictionary address (remembered from previous) (varint uint64) + - write length of field name (varint uint64) + - write field name bytes + +## fields idx + +- for each field + - file writing phase: + - write big endian uint64 of start offset for each field + +NOTE: currently we don't know or record the length of this fields index. Instead we rely on the fact that we know it immediately precedes a footer of known size. + +## fields DocValue + +- for each field + - preparation phase: + - produce a slice containing multiple consecutive chunks, where each chunk is composed of a meta section followed by compressed columnar field data + - produce a slice remembering the length of each chunk + - file writing phase: + - remember the start position of this first field DocValue offset in the footer + - write out number of chunks that follow (varint uint64) + - write out length of each chunk (each a varint uint64) + - write out the byte slice containing all the chunk data + +NOTE: currently the meta header inside each chunk gives clue to the location offsets and size of the data pertaining to a given docID and any +read operation leverage that meta information to extract the document specific data from the file. + +## footer + +- file writing phase + - write number of docs (big endian uint64) + - write stored field index location (big endian uint64) + - write field index location (big endian uint64) + - write field docValue location (big endian uint64) + - write out chunk factor (big endian uint32) + - write out version (big endian uint32) + - write out file CRC of everything preceding this (big endian uint32) diff --git a/vendor/github.com/blevesearch/zap/v12/build.go b/vendor/github.com/blevesearch/zap/v12/build.go new file mode 100644 index 0000000000..467e5e007b --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/build.go @@ -0,0 +1,156 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bufio" + "math" + "os" + + "github.com/couchbase/vellum" +) + +const Version uint32 = 12 + +const Type string = "zap" + +const fieldNotUninverted = math.MaxUint64 + +func (sb *SegmentBase) Persist(path string) error { + return PersistSegmentBase(sb, path) +} + +// PersistSegmentBase persists SegmentBase in the zap file format. +func PersistSegmentBase(sb *SegmentBase, path string) error { + flag := os.O_RDWR | os.O_CREATE + + f, err := os.OpenFile(path, flag, 0600) + if err != nil { + return err + } + + cleanup := func() { + _ = f.Close() + _ = os.Remove(path) + } + + br := bufio.NewWriter(f) + + _, err = br.Write(sb.mem) + if err != nil { + cleanup() + return err + } + + err = persistFooter(sb.numDocs, sb.storedIndexOffset, sb.fieldsIndexOffset, sb.docValueOffset, + sb.chunkMode, sb.memCRC, br) + if err != nil { + cleanup() + return err + } + + err = br.Flush() + if err != nil { + cleanup() + return err + } + + err = f.Sync() + if err != nil { + cleanup() + return err + } + + err = f.Close() + if err != nil { + cleanup() + return err + } + + return nil +} + +func persistStoredFieldValues(fieldID int, + storedFieldValues [][]byte, stf []byte, spf [][]uint64, + curr int, metaEncode varintEncoder, data []byte) ( + int, []byte, error) { + for i := 0; i < len(storedFieldValues); i++ { + // encode field + _, err := metaEncode(uint64(fieldID)) + if err != nil { + return 0, nil, err + } + // encode type + _, err = metaEncode(uint64(stf[i])) + if err != nil { + return 0, nil, err + } + // encode start offset + _, err = metaEncode(uint64(curr)) + if err != nil { + return 0, nil, err + } + // end len + _, err = metaEncode(uint64(len(storedFieldValues[i]))) + if err != nil { + return 0, nil, err + } + // encode number of array pos + _, err = metaEncode(uint64(len(spf[i]))) + if err != nil { + return 0, nil, err + } + // encode all array positions + for _, pos := range spf[i] { + _, err = metaEncode(pos) + if err != nil { + return 0, nil, err + } + } + + data = append(data, storedFieldValues[i]...) + curr += len(storedFieldValues[i]) + } + + return curr, data, nil +} + +func InitSegmentBase(mem []byte, memCRC uint32, chunkMode uint32, + fieldsMap map[string]uint16, fieldsInv []string, numDocs uint64, + storedIndexOffset uint64, fieldsIndexOffset uint64, docValueOffset uint64, + dictLocs []uint64) (*SegmentBase, error) { + sb := &SegmentBase{ + mem: mem, + memCRC: memCRC, + chunkMode: chunkMode, + fieldsMap: fieldsMap, + fieldsInv: fieldsInv, + numDocs: numDocs, + storedIndexOffset: storedIndexOffset, + fieldsIndexOffset: fieldsIndexOffset, + docValueOffset: docValueOffset, + dictLocs: dictLocs, + fieldDvReaders: make(map[uint16]*docValueReader), + fieldFSTs: make(map[uint16]*vellum.FST), + } + sb.updateSize() + + err := sb.loadDvReaders() + if err != nil { + return nil, err + } + + return sb, nil +} diff --git a/vendor/github.com/blevesearch/zap/v12/chunk.go b/vendor/github.com/blevesearch/zap/v12/chunk.go new file mode 100644 index 0000000000..fe9f398da6 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/chunk.go @@ -0,0 +1,54 @@ +// Copyright (c) 2019 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "fmt" +) + +// LegacyChunkMode was the original chunk mode (always chunk size 1024) +// this mode is still used for chunking doc values. +var LegacyChunkMode uint32 = 1024 + +// DefaultChunkMode is the most recent improvement to chunking and should +// be used by default. +var DefaultChunkMode uint32 = 1025 + +func getChunkSize(chunkMode uint32, cardinality uint64, maxDocs uint64) (uint64, error) { + switch { + // any chunkMode <= 1024 will always chunk with chunkSize=chunkMode + case chunkMode <= 1024: + // legacy chunk size + return uint64(chunkMode), nil + + case chunkMode == 1025: + // attempt at simple improvement + // theory - the point of chunking is to put a bound on the maximum number of + // calls to Next() needed to find a random document. ie, you should be able + // to do one jump to the correct chunk, and then walk through at most + // chunk-size items + // previously 1024 was chosen as the chunk size, but this is particularly + // wasteful for low cardinality terms. the observation is that if there + // are less than 1024 items, why not put them all in one chunk, + // this way you'll still achieve the same goal of visiting at most + // chunk-size items. + // no attempt is made to tweak any other case + if cardinality <= 1024 { + return maxDocs, nil + } + return 1024, nil + } + return 0, fmt.Errorf("unknown chunk mode %d", chunkMode) +} diff --git a/vendor/github.com/blevesearch/zap/v12/contentcoder.go b/vendor/github.com/blevesearch/zap/v12/contentcoder.go new file mode 100644 index 0000000000..c145b5a113 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/contentcoder.go @@ -0,0 +1,243 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + "encoding/binary" + "io" + "reflect" + + "github.com/golang/snappy" +) + +var reflectStaticSizeMetaData int + +func init() { + var md MetaData + reflectStaticSizeMetaData = int(reflect.TypeOf(md).Size()) +} + +var termSeparator byte = 0xff +var termSeparatorSplitSlice = []byte{termSeparator} + +type chunkedContentCoder struct { + final []byte + chunkSize uint64 + currChunk uint64 + chunkLens []uint64 + + w io.Writer + progressiveWrite bool + + chunkMetaBuf bytes.Buffer + chunkBuf bytes.Buffer + + chunkMeta []MetaData + + compressed []byte // temp buf for snappy compression +} + +// MetaData represents the data information inside a +// chunk. +type MetaData struct { + DocNum uint64 // docNum of the data inside the chunk + DocDvOffset uint64 // offset of data inside the chunk for the given docid +} + +// newChunkedContentCoder returns a new chunk content coder which +// packs data into chunks based on the provided chunkSize +func newChunkedContentCoder(chunkSize uint64, maxDocNum uint64, + w io.Writer, progressiveWrite bool) *chunkedContentCoder { + total := maxDocNum/chunkSize + 1 + rv := &chunkedContentCoder{ + chunkSize: chunkSize, + chunkLens: make([]uint64, total), + chunkMeta: make([]MetaData, 0, total), + w: w, + progressiveWrite: progressiveWrite, + } + + return rv +} + +// Reset lets you reuse this chunked content coder. Buffers are reset +// and re used. You cannot change the chunk size. +func (c *chunkedContentCoder) Reset() { + c.currChunk = 0 + c.final = c.final[:0] + c.chunkBuf.Reset() + c.chunkMetaBuf.Reset() + for i := range c.chunkLens { + c.chunkLens[i] = 0 + } + c.chunkMeta = c.chunkMeta[:0] +} + +func (c *chunkedContentCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { + total := int(maxDocNum/chunkSize + 1) + c.chunkSize = chunkSize + if cap(c.chunkLens) < total { + c.chunkLens = make([]uint64, total) + } else { + c.chunkLens = c.chunkLens[:total] + } + if cap(c.chunkMeta) < total { + c.chunkMeta = make([]MetaData, 0, total) + } +} + +// Close indicates you are done calling Add() this allows +// the final chunk to be encoded. +func (c *chunkedContentCoder) Close() error { + return c.flushContents() +} + +func (c *chunkedContentCoder) flushContents() error { + // flush the contents, with meta information at first + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(buf, uint64(len(c.chunkMeta))) + _, err := c.chunkMetaBuf.Write(buf[:n]) + if err != nil { + return err + } + + // write out the metaData slice + for _, meta := range c.chunkMeta { + _, err := writeUvarints(&c.chunkMetaBuf, meta.DocNum, meta.DocDvOffset) + if err != nil { + return err + } + } + + // write the metadata to final data + metaData := c.chunkMetaBuf.Bytes() + c.final = append(c.final, c.chunkMetaBuf.Bytes()...) + // write the compressed data to the final data + c.compressed = snappy.Encode(c.compressed[:cap(c.compressed)], c.chunkBuf.Bytes()) + c.final = append(c.final, c.compressed...) + + c.chunkLens[c.currChunk] = uint64(len(c.compressed) + len(metaData)) + + if c.progressiveWrite { + _, err := c.w.Write(c.final) + if err != nil { + return err + } + c.final = c.final[:0] + } + + return nil +} + +// Add encodes the provided byte slice into the correct chunk for the provided +// doc num. You MUST call Add() with increasing docNums. +func (c *chunkedContentCoder) Add(docNum uint64, vals []byte) error { + chunk := docNum / c.chunkSize + if chunk != c.currChunk { + // flush out the previous chunk details + err := c.flushContents() + if err != nil { + return err + } + // clearing the chunk specific meta for next chunk + c.chunkBuf.Reset() + c.chunkMetaBuf.Reset() + c.chunkMeta = c.chunkMeta[:0] + c.currChunk = chunk + } + + // get the starting offset for this doc + dvOffset := c.chunkBuf.Len() + dvSize, err := c.chunkBuf.Write(vals) + if err != nil { + return err + } + + c.chunkMeta = append(c.chunkMeta, MetaData{ + DocNum: docNum, + DocDvOffset: uint64(dvOffset + dvSize), + }) + return nil +} + +// Write commits all the encoded chunked contents to the provided writer. +// +// | ..... data ..... | chunk offsets (varints) +// | position of chunk offsets (uint64) | number of offsets (uint64) | +// +func (c *chunkedContentCoder) Write() (int, error) { + var tw int + + if c.final != nil { + // write out the data section first + nw, err := c.w.Write(c.final) + tw += nw + if err != nil { + return tw, err + } + } + + chunkOffsetsStart := uint64(tw) + + if cap(c.final) < binary.MaxVarintLen64 { + c.final = make([]byte, binary.MaxVarintLen64) + } else { + c.final = c.final[0:binary.MaxVarintLen64] + } + chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) + // write out the chunk offsets + for _, chunkOffset := range chunkOffsets { + n := binary.PutUvarint(c.final, chunkOffset) + nw, err := c.w.Write(c.final[:n]) + tw += nw + if err != nil { + return tw, err + } + } + + chunkOffsetsLen := uint64(tw) - chunkOffsetsStart + + c.final = c.final[0:8] + // write out the length of chunk offsets + binary.BigEndian.PutUint64(c.final, chunkOffsetsLen) + nw, err := c.w.Write(c.final) + tw += nw + if err != nil { + return tw, err + } + + // write out the number of chunks + binary.BigEndian.PutUint64(c.final, uint64(len(c.chunkLens))) + nw, err = c.w.Write(c.final) + tw += nw + if err != nil { + return tw, err + } + + c.final = c.final[:0] + + return tw, nil +} + +// ReadDocValueBoundary elicits the start, end offsets from a +// metaData header slice +func ReadDocValueBoundary(chunk int, metaHeaders []MetaData) (uint64, uint64) { + var start uint64 + if chunk > 0 { + start = metaHeaders[chunk-1].DocDvOffset + } + return start, metaHeaders[chunk].DocDvOffset +} diff --git a/vendor/github.com/blevesearch/zap/v12/count.go b/vendor/github.com/blevesearch/zap/v12/count.go new file mode 100644 index 0000000000..50290f8882 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/count.go @@ -0,0 +1,61 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "hash/crc32" + "io" + + "github.com/blevesearch/bleve/index/scorch/segment" +) + +// CountHashWriter is a wrapper around a Writer which counts the number of +// bytes which have been written and computes a crc32 hash +type CountHashWriter struct { + w io.Writer + crc uint32 + n int + s segment.StatsReporter +} + +// NewCountHashWriter returns a CountHashWriter which wraps the provided Writer +func NewCountHashWriter(w io.Writer) *CountHashWriter { + return &CountHashWriter{w: w} +} + +func NewCountHashWriterWithStatsReporter(w io.Writer, s segment.StatsReporter) *CountHashWriter { + return &CountHashWriter{w: w, s: s} +} + +// Write writes the provided bytes to the wrapped writer and counts the bytes +func (c *CountHashWriter) Write(b []byte) (int, error) { + n, err := c.w.Write(b) + c.crc = crc32.Update(c.crc, crc32.IEEETable, b[:n]) + c.n += n + if c.s != nil { + c.s.ReportBytesWritten(uint64(n)) + } + return n, err +} + +// Count returns the number of bytes written +func (c *CountHashWriter) Count() int { + return c.n +} + +// Sum32 returns the CRC-32 hash of the content written to this writer +func (c *CountHashWriter) Sum32() uint32 { + return c.crc +} diff --git a/vendor/github.com/blevesearch/zap/v12/dict.go b/vendor/github.com/blevesearch/zap/v12/dict.go new file mode 100644 index 0000000000..ad4a8f8dc5 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/dict.go @@ -0,0 +1,263 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + "fmt" + + "github.com/RoaringBitmap/roaring" + "github.com/blevesearch/bleve/index" + "github.com/blevesearch/bleve/index/scorch/segment" + "github.com/couchbase/vellum" +) + +// Dictionary is the zap representation of the term dictionary +type Dictionary struct { + sb *SegmentBase + field string + fieldID uint16 + fst *vellum.FST + fstReader *vellum.Reader +} + +// PostingsList returns the postings list for the specified term +func (d *Dictionary) PostingsList(term []byte, except *roaring.Bitmap, + prealloc segment.PostingsList) (segment.PostingsList, error) { + var preallocPL *PostingsList + pl, ok := prealloc.(*PostingsList) + if ok && pl != nil { + preallocPL = pl + } + return d.postingsList(term, except, preallocPL) +} + +func (d *Dictionary) postingsList(term []byte, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { + if d.fstReader == nil { + if rv == nil || rv == emptyPostingsList { + return emptyPostingsList, nil + } + return d.postingsListInit(rv, except), nil + } + + postingsOffset, exists, err := d.fstReader.Get(term) + if err != nil { + return nil, fmt.Errorf("vellum err: %v", err) + } + if !exists { + if rv == nil || rv == emptyPostingsList { + return emptyPostingsList, nil + } + return d.postingsListInit(rv, except), nil + } + + return d.postingsListFromOffset(postingsOffset, except, rv) +} + +func (d *Dictionary) postingsListFromOffset(postingsOffset uint64, except *roaring.Bitmap, rv *PostingsList) (*PostingsList, error) { + rv = d.postingsListInit(rv, except) + + err := rv.read(postingsOffset, d) + if err != nil { + return nil, err + } + + return rv, nil +} + +func (d *Dictionary) postingsListInit(rv *PostingsList, except *roaring.Bitmap) *PostingsList { + if rv == nil || rv == emptyPostingsList { + rv = &PostingsList{} + } else { + postings := rv.postings + if postings != nil { + postings.Clear() + } + + *rv = PostingsList{} // clear the struct + + rv.postings = postings + } + rv.sb = d.sb + rv.except = except + return rv +} + +func (d *Dictionary) Contains(key []byte) (bool, error) { + return d.fst.Contains(key) +} + +// Iterator returns an iterator for this dictionary +func (d *Dictionary) Iterator() segment.DictionaryIterator { + rv := &DictionaryIterator{ + d: d, + } + + if d.fst != nil { + itr, err := d.fst.Iterator(nil, nil) + if err == nil { + rv.itr = itr + } else if err != vellum.ErrIteratorDone { + rv.err = err + } + } + + return rv +} + +// PrefixIterator returns an iterator which only visits terms having the +// the specified prefix +func (d *Dictionary) PrefixIterator(prefix string) segment.DictionaryIterator { + rv := &DictionaryIterator{ + d: d, + } + + kBeg := []byte(prefix) + kEnd := segment.IncrementBytes(kBeg) + + if d.fst != nil { + itr, err := d.fst.Iterator(kBeg, kEnd) + if err == nil { + rv.itr = itr + } else if err != vellum.ErrIteratorDone { + rv.err = err + } + } + + return rv +} + +// RangeIterator returns an iterator which only visits terms between the +// start and end terms. NOTE: bleve.index API specifies the end is inclusive. +func (d *Dictionary) RangeIterator(start, end string) segment.DictionaryIterator { + rv := &DictionaryIterator{ + d: d, + } + + // need to increment the end position to be inclusive + var endBytes []byte + if len(end) > 0 { + endBytes = []byte(end) + if endBytes[len(endBytes)-1] < 0xff { + endBytes[len(endBytes)-1]++ + } else { + endBytes = append(endBytes, 0xff) + } + } + + if d.fst != nil { + itr, err := d.fst.Iterator([]byte(start), endBytes) + if err == nil { + rv.itr = itr + } else if err != vellum.ErrIteratorDone { + rv.err = err + } + } + + return rv +} + +// AutomatonIterator returns an iterator which only visits terms +// having the the vellum automaton and start/end key range +func (d *Dictionary) AutomatonIterator(a vellum.Automaton, + startKeyInclusive, endKeyExclusive []byte) segment.DictionaryIterator { + rv := &DictionaryIterator{ + d: d, + } + + if d.fst != nil { + itr, err := d.fst.Search(a, startKeyInclusive, endKeyExclusive) + if err == nil { + rv.itr = itr + } else if err != vellum.ErrIteratorDone { + rv.err = err + } + } + + return rv +} + +func (d *Dictionary) OnlyIterator(onlyTerms [][]byte, + includeCount bool) segment.DictionaryIterator { + + rv := &DictionaryIterator{ + d: d, + omitCount: !includeCount, + } + + var buf bytes.Buffer + builder, err := vellum.New(&buf, nil) + if err != nil { + rv.err = err + return rv + } + for _, term := range onlyTerms { + err = builder.Insert(term, 0) + if err != nil { + rv.err = err + return rv + } + } + err = builder.Close() + if err != nil { + rv.err = err + return rv + } + + onlyFST, err := vellum.Load(buf.Bytes()) + if err != nil { + rv.err = err + return rv + } + + itr, err := d.fst.Search(onlyFST, nil, nil) + if err == nil { + rv.itr = itr + } else if err != vellum.ErrIteratorDone { + rv.err = err + } + + return rv +} + +// DictionaryIterator is an iterator for term dictionary +type DictionaryIterator struct { + d *Dictionary + itr vellum.Iterator + err error + tmp PostingsList + entry index.DictEntry + omitCount bool +} + +// Next returns the next entry in the dictionary +func (i *DictionaryIterator) Next() (*index.DictEntry, error) { + if i.err != nil && i.err != vellum.ErrIteratorDone { + return nil, i.err + } else if i.itr == nil || i.err == vellum.ErrIteratorDone { + return nil, nil + } + term, postingsOffset := i.itr.Current() + i.entry.Term = string(term) + if !i.omitCount { + i.err = i.tmp.read(postingsOffset, i.d) + if i.err != nil { + return nil, i.err + } + i.entry.Count = i.tmp.Count() + } + i.err = i.itr.Next() + return &i.entry, nil +} diff --git a/vendor/github.com/blevesearch/zap/v12/docvalues.go b/vendor/github.com/blevesearch/zap/v12/docvalues.go new file mode 100644 index 0000000000..793797bd8a --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/docvalues.go @@ -0,0 +1,312 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + + "github.com/blevesearch/bleve/index" + "github.com/blevesearch/bleve/index/scorch/segment" + "github.com/blevesearch/bleve/size" + "github.com/golang/snappy" +) + +var reflectStaticSizedocValueReader int + +func init() { + var dvi docValueReader + reflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size()) +} + +type docNumTermsVisitor func(docNum uint64, terms []byte) error + +type docVisitState struct { + dvrs map[uint16]*docValueReader + segment *SegmentBase +} + +type docValueReader struct { + field string + curChunkNum uint64 + chunkOffsets []uint64 + dvDataLoc uint64 + curChunkHeader []MetaData + curChunkData []byte // compressed data cache + uncompressed []byte // temp buf for snappy decompression +} + +func (di *docValueReader) size() int { + return reflectStaticSizedocValueReader + size.SizeOfPtr + + len(di.field) + + len(di.chunkOffsets)*size.SizeOfUint64 + + len(di.curChunkHeader)*reflectStaticSizeMetaData + + len(di.curChunkData) +} + +func (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader { + if rv == nil { + rv = &docValueReader{} + } + + rv.field = di.field + rv.curChunkNum = math.MaxUint64 + rv.chunkOffsets = di.chunkOffsets // immutable, so it's sharable + rv.dvDataLoc = di.dvDataLoc + rv.curChunkHeader = rv.curChunkHeader[:0] + rv.curChunkData = nil + rv.uncompressed = rv.uncompressed[:0] + + return rv +} + +func (di *docValueReader) curChunkNumber() uint64 { + return di.curChunkNum +} + +func (s *SegmentBase) loadFieldDocValueReader(field string, + fieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) { + // get the docValue offset for the given fields + if fieldDvLocStart == fieldNotUninverted { + // no docValues found, nothing to do + return nil, nil + } + + // read the number of chunks, and chunk offsets position + var numChunks, chunkOffsetsPosition uint64 + + if fieldDvLocEnd-fieldDvLocStart > 16 { + numChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd]) + // read the length of chunk offsets + chunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8]) + // acquire position of chunk offsets + chunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen + } else { + return nil, fmt.Errorf("loadFieldDocValueReader: fieldDvLoc too small: %d-%d", fieldDvLocEnd, fieldDvLocStart) + } + + fdvIter := &docValueReader{ + curChunkNum: math.MaxUint64, + field: field, + chunkOffsets: make([]uint64, int(numChunks)), + } + + // read the chunk offsets + var offset uint64 + for i := 0; i < int(numChunks); i++ { + loc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64]) + if read <= 0 { + return nil, fmt.Errorf("corrupted chunk offset during segment load") + } + fdvIter.chunkOffsets[i] = loc + offset += uint64(read) + } + + // set the data offset + fdvIter.dvDataLoc = fieldDvLocStart + + return fdvIter, nil +} + +func (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error { + // advance to the chunk where the docValues + // reside for the given docNum + destChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc + start, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets) + if start >= end { + di.curChunkHeader = di.curChunkHeader[:0] + di.curChunkData = nil + di.curChunkNum = chunkNumber + di.uncompressed = di.uncompressed[:0] + return nil + } + + destChunkDataLoc += start + curChunkEnd += end + + // read the number of docs reside in the chunk + numDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64]) + if read <= 0 { + return fmt.Errorf("failed to read the chunk") + } + chunkMetaLoc := destChunkDataLoc + uint64(read) + + offset := uint64(0) + if cap(di.curChunkHeader) < int(numDocs) { + di.curChunkHeader = make([]MetaData, int(numDocs)) + } else { + di.curChunkHeader = di.curChunkHeader[:int(numDocs)] + } + for i := 0; i < int(numDocs); i++ { + di.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) + offset += uint64(read) + di.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64]) + offset += uint64(read) + } + + compressedDataLoc := chunkMetaLoc + offset + dataLength := curChunkEnd - compressedDataLoc + di.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength] + di.curChunkNum = chunkNumber + di.uncompressed = di.uncompressed[:0] + return nil +} + +func (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error { + for i := 0; i < len(di.chunkOffsets); i++ { + err := di.loadDvChunk(uint64(i), s) + if err != nil { + return err + } + if di.curChunkData == nil || len(di.curChunkHeader) == 0 { + continue + } + + // uncompress the already loaded data + uncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) + if err != nil { + return err + } + di.uncompressed = uncompressed + + start := uint64(0) + for _, entry := range di.curChunkHeader { + err = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset]) + if err != nil { + return err + } + + start = entry.DocDvOffset + } + } + + return nil +} + +func (di *docValueReader) visitDocValues(docNum uint64, + visitor index.DocumentFieldTermVisitor) error { + // binary search the term locations for the docNum + start, end := di.getDocValueLocs(docNum) + if start == math.MaxUint64 || end == math.MaxUint64 || start == end { + return nil + } + + var uncompressed []byte + var err error + // use the uncompressed copy if available + if len(di.uncompressed) > 0 { + uncompressed = di.uncompressed + } else { + // uncompress the already loaded data + uncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData) + if err != nil { + return err + } + di.uncompressed = uncompressed + } + + // pick the terms for the given docNum + uncompressed = uncompressed[start:end] + for { + i := bytes.Index(uncompressed, termSeparatorSplitSlice) + if i < 0 { + break + } + + visitor(di.field, uncompressed[0:i]) + uncompressed = uncompressed[i+1:] + } + + return nil +} + +func (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) { + i := sort.Search(len(di.curChunkHeader), func(i int) bool { + return di.curChunkHeader[i].DocNum >= docNum + }) + if i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum { + return ReadDocValueBoundary(i, di.curChunkHeader) + } + return math.MaxUint64, math.MaxUint64 +} + +// VisitDocumentFieldTerms is an implementation of the +// DocumentFieldTermVisitable interface +func (s *SegmentBase) VisitDocumentFieldTerms(localDocNum uint64, fields []string, + visitor index.DocumentFieldTermVisitor, dvsIn segment.DocVisitState) ( + segment.DocVisitState, error) { + dvs, ok := dvsIn.(*docVisitState) + if !ok || dvs == nil { + dvs = &docVisitState{} + } else { + if dvs.segment != s { + dvs.segment = s + dvs.dvrs = nil + } + } + + var fieldIDPlus1 uint16 + if dvs.dvrs == nil { + dvs.dvrs = make(map[uint16]*docValueReader, len(fields)) + for _, field := range fields { + if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { + continue + } + fieldID := fieldIDPlus1 - 1 + if dvIter, exists := s.fieldDvReaders[fieldID]; exists && + dvIter != nil { + dvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID]) + } + } + } + + // find the chunkNumber where the docValues are stored + // NOTE: doc values continue to use legacy chunk mode + chunkFactor, err := getChunkSize(LegacyChunkMode, 0, 0) + if err != nil { + return nil, err + } + docInChunk := localDocNum / chunkFactor + var dvr *docValueReader + for _, field := range fields { + if fieldIDPlus1, ok = s.fieldsMap[field]; !ok { + continue + } + fieldID := fieldIDPlus1 - 1 + if dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil { + // check if the chunk is already loaded + if docInChunk != dvr.curChunkNumber() { + err := dvr.loadDvChunk(docInChunk, s) + if err != nil { + return dvs, err + } + } + + _ = dvr.visitDocValues(localDocNum, visitor) + } + } + return dvs, nil +} + +// VisitableDocValueFields returns the list of fields with +// persisted doc value terms ready to be visitable using the +// VisitDocumentFieldTerms method. +func (s *SegmentBase) VisitableDocValueFields() ([]string, error) { + return s.fieldDvNames, nil +} diff --git a/vendor/github.com/blevesearch/zap/v12/enumerator.go b/vendor/github.com/blevesearch/zap/v12/enumerator.go new file mode 100644 index 0000000000..bc5b7e6276 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/enumerator.go @@ -0,0 +1,138 @@ +// Copyright (c) 2018 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + + "github.com/couchbase/vellum" +) + +// enumerator provides an ordered traversal of multiple vellum +// iterators. Like JOIN of iterators, the enumerator produces a +// sequence of (key, iteratorIndex, value) tuples, sorted by key ASC, +// then iteratorIndex ASC, where the same key might be seen or +// repeated across multiple child iterators. +type enumerator struct { + itrs []vellum.Iterator + currKs [][]byte + currVs []uint64 + + lowK []byte + lowIdxs []int + lowCurr int +} + +// newEnumerator returns a new enumerator over the vellum Iterators +func newEnumerator(itrs []vellum.Iterator) (*enumerator, error) { + rv := &enumerator{ + itrs: itrs, + currKs: make([][]byte, len(itrs)), + currVs: make([]uint64, len(itrs)), + lowIdxs: make([]int, 0, len(itrs)), + } + for i, itr := range rv.itrs { + rv.currKs[i], rv.currVs[i] = itr.Current() + } + rv.updateMatches(false) + if rv.lowK == nil && len(rv.lowIdxs) == 0 { + return rv, vellum.ErrIteratorDone + } + return rv, nil +} + +// updateMatches maintains the low key matches based on the currKs +func (m *enumerator) updateMatches(skipEmptyKey bool) { + m.lowK = nil + m.lowIdxs = m.lowIdxs[:0] + m.lowCurr = 0 + + for i, key := range m.currKs { + if (key == nil && m.currVs[i] == 0) || // in case of empty iterator + (len(key) == 0 && skipEmptyKey) { // skip empty keys + continue + } + + cmp := bytes.Compare(key, m.lowK) + if cmp < 0 || len(m.lowIdxs) == 0 { + // reached a new low + m.lowK = key + m.lowIdxs = m.lowIdxs[:0] + m.lowIdxs = append(m.lowIdxs, i) + } else if cmp == 0 { + m.lowIdxs = append(m.lowIdxs, i) + } + } +} + +// Current returns the enumerator's current key, iterator-index, and +// value. If the enumerator is not pointing at a valid value (because +// Next returned an error previously), Current will return nil,0,0. +func (m *enumerator) Current() ([]byte, int, uint64) { + var i int + var v uint64 + if m.lowCurr < len(m.lowIdxs) { + i = m.lowIdxs[m.lowCurr] + v = m.currVs[i] + } + return m.lowK, i, v +} + +// GetLowIdxsAndValues will return all of the iterator indices +// which point to the current key, and their corresponding +// values. This can be used by advanced caller which may need +// to peek into these other sets of data before processing. +func (m *enumerator) GetLowIdxsAndValues() ([]int, []uint64) { + values := make([]uint64, 0, len(m.lowIdxs)) + for _, idx := range m.lowIdxs { + values = append(values, m.currVs[idx]) + } + return m.lowIdxs, values +} + +// Next advances the enumerator to the next key/iterator/value result, +// else vellum.ErrIteratorDone is returned. +func (m *enumerator) Next() error { + m.lowCurr += 1 + if m.lowCurr >= len(m.lowIdxs) { + // move all the current low iterators forwards + for _, vi := range m.lowIdxs { + err := m.itrs[vi].Next() + if err != nil && err != vellum.ErrIteratorDone { + return err + } + m.currKs[vi], m.currVs[vi] = m.itrs[vi].Current() + } + // can skip any empty keys encountered at this point + m.updateMatches(true) + } + if m.lowK == nil && len(m.lowIdxs) == 0 { + return vellum.ErrIteratorDone + } + return nil +} + +// Close all the underlying Iterators. The first error, if any, will +// be returned. +func (m *enumerator) Close() error { + var rv error + for _, itr := range m.itrs { + err := itr.Close() + if rv == nil { + rv = err + } + } + return rv +} diff --git a/vendor/github.com/blevesearch/zap/v12/go.mod b/vendor/github.com/blevesearch/zap/v12/go.mod new file mode 100644 index 0000000000..24bbcb2c16 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/go.mod @@ -0,0 +1,12 @@ +module github.com/blevesearch/zap/v12 + +go 1.12 + +require ( + github.com/RoaringBitmap/roaring v0.4.21 + github.com/blevesearch/bleve v1.0.7 + github.com/blevesearch/mmap-go v1.0.2 + github.com/couchbase/vellum v1.0.1 + github.com/golang/snappy v0.0.1 + github.com/spf13/cobra v0.0.5 +) diff --git a/vendor/github.com/blevesearch/zap/v12/intDecoder.go b/vendor/github.com/blevesearch/zap/v12/intDecoder.go new file mode 100644 index 0000000000..4cd008ff26 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/intDecoder.go @@ -0,0 +1,111 @@ +// Copyright (c) 2019 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "encoding/binary" + "fmt" + + "github.com/blevesearch/bleve/index/scorch/segment" +) + +type chunkedIntDecoder struct { + startOffset uint64 + dataStartOffset uint64 + chunkOffsets []uint64 + curChunkBytes []byte + data []byte + r *segment.MemUvarintReader +} + +func newChunkedIntDecoder(buf []byte, offset uint64) *chunkedIntDecoder { + rv := &chunkedIntDecoder{startOffset: offset, data: buf} + var n, numChunks uint64 + var read int + if offset == termNotEncoded { + numChunks = 0 + } else { + numChunks, read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) + } + + n += uint64(read) + if cap(rv.chunkOffsets) >= int(numChunks) { + rv.chunkOffsets = rv.chunkOffsets[:int(numChunks)] + } else { + rv.chunkOffsets = make([]uint64, int(numChunks)) + } + for i := 0; i < int(numChunks); i++ { + rv.chunkOffsets[i], read = binary.Uvarint(buf[offset+n : offset+n+binary.MaxVarintLen64]) + n += uint64(read) + } + rv.dataStartOffset = offset + n + return rv +} + +func (d *chunkedIntDecoder) loadChunk(chunk int) error { + if d.startOffset == termNotEncoded { + d.r = segment.NewMemUvarintReader([]byte(nil)) + return nil + } + + if chunk >= len(d.chunkOffsets) { + return fmt.Errorf("tried to load freq chunk that doesn't exist %d/(%d)", + chunk, len(d.chunkOffsets)) + } + + end, start := d.dataStartOffset, d.dataStartOffset + s, e := readChunkBoundary(chunk, d.chunkOffsets) + start += s + end += e + d.curChunkBytes = d.data[start:end] + if d.r == nil { + d.r = segment.NewMemUvarintReader(d.curChunkBytes) + } else { + d.r.Reset(d.curChunkBytes) + } + + return nil +} + +func (d *chunkedIntDecoder) reset() { + d.startOffset = 0 + d.dataStartOffset = 0 + d.chunkOffsets = d.chunkOffsets[:0] + d.curChunkBytes = d.curChunkBytes[:0] + d.data = d.data[:0] + if d.r != nil { + d.r.Reset([]byte(nil)) + } +} + +func (d *chunkedIntDecoder) isNil() bool { + return d.curChunkBytes == nil +} + +func (d *chunkedIntDecoder) readUvarint() (uint64, error) { + return d.r.ReadUvarint() +} + +func (d *chunkedIntDecoder) SkipUvarint() { + d.r.SkipUvarint() +} + +func (d *chunkedIntDecoder) SkipBytes(count int) { + d.r.SkipBytes(count) +} + +func (d *chunkedIntDecoder) Len() int { + return d.r.Len() +} diff --git a/vendor/github.com/blevesearch/zap/v12/intcoder.go b/vendor/github.com/blevesearch/zap/v12/intcoder.go new file mode 100644 index 0000000000..7682593e9f --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/intcoder.go @@ -0,0 +1,203 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + "encoding/binary" + "io" + "math" +) + +const termNotEncoded = math.MaxUint64 + +type chunkedIntCoder struct { + final []byte + chunkSize uint64 + chunkBuf bytes.Buffer + chunkLens []uint64 + currChunk uint64 + + buf []byte +} + +// newChunkedIntCoder returns a new chunk int coder which packs data into +// chunks based on the provided chunkSize and supports up to the specified +// maxDocNum +func newChunkedIntCoder(chunkSize uint64, maxDocNum uint64) *chunkedIntCoder { + total := maxDocNum/chunkSize + 1 + rv := &chunkedIntCoder{ + chunkSize: chunkSize, + chunkLens: make([]uint64, total), + final: make([]byte, 0, 64), + } + + return rv +} + +// Reset lets you reuse this chunked int coder. buffers are reset and reused +// from previous use. you cannot change the chunk size or max doc num. +func (c *chunkedIntCoder) Reset() { + c.final = c.final[:0] + c.chunkBuf.Reset() + c.currChunk = 0 + for i := range c.chunkLens { + c.chunkLens[i] = 0 + } +} + +// SetChunkSize changes the chunk size. It is only valid to do so +// with a new chunkedIntCoder, or immediately after calling Reset() +func (c *chunkedIntCoder) SetChunkSize(chunkSize uint64, maxDocNum uint64) { + total := int(maxDocNum/chunkSize + 1) + c.chunkSize = chunkSize + if cap(c.chunkLens) < total { + c.chunkLens = make([]uint64, total) + } else { + c.chunkLens = c.chunkLens[:total] + } +} + +// Add encodes the provided integers into the correct chunk for the provided +// doc num. You MUST call Add() with increasing docNums. +func (c *chunkedIntCoder) Add(docNum uint64, vals ...uint64) error { + chunk := docNum / c.chunkSize + if chunk != c.currChunk { + // starting a new chunk + c.Close() + c.chunkBuf.Reset() + c.currChunk = chunk + } + + if len(c.buf) < binary.MaxVarintLen64 { + c.buf = make([]byte, binary.MaxVarintLen64) + } + + for _, val := range vals { + wb := binary.PutUvarint(c.buf, val) + _, err := c.chunkBuf.Write(c.buf[:wb]) + if err != nil { + return err + } + } + + return nil +} + +func (c *chunkedIntCoder) AddBytes(docNum uint64, buf []byte) error { + chunk := docNum / c.chunkSize + if chunk != c.currChunk { + // starting a new chunk + c.Close() + c.chunkBuf.Reset() + c.currChunk = chunk + } + + _, err := c.chunkBuf.Write(buf) + return err +} + +// Close indicates you are done calling Add() this allows the final chunk +// to be encoded. +func (c *chunkedIntCoder) Close() { + encodingBytes := c.chunkBuf.Bytes() + c.chunkLens[c.currChunk] = uint64(len(encodingBytes)) + c.final = append(c.final, encodingBytes...) + c.currChunk = uint64(cap(c.chunkLens)) // sentinel to detect double close +} + +// Write commits all the encoded chunked integers to the provided writer. +func (c *chunkedIntCoder) Write(w io.Writer) (int, error) { + bufNeeded := binary.MaxVarintLen64 * (1 + len(c.chunkLens)) + if len(c.buf) < bufNeeded { + c.buf = make([]byte, bufNeeded) + } + buf := c.buf + + // convert the chunk lengths into chunk offsets + chunkOffsets := modifyLengthsToEndOffsets(c.chunkLens) + + // write out the number of chunks & each chunk offsets + n := binary.PutUvarint(buf, uint64(len(chunkOffsets))) + for _, chunkOffset := range chunkOffsets { + n += binary.PutUvarint(buf[n:], chunkOffset) + } + + tw, err := w.Write(buf[:n]) + if err != nil { + return tw, err + } + + // write out the data + nw, err := w.Write(c.final) + tw += nw + if err != nil { + return tw, err + } + return tw, nil +} + +// writeAt commits all the encoded chunked integers to the provided writer +// and returns the starting offset, total bytes written and an error +func (c *chunkedIntCoder) writeAt(w io.Writer) (uint64, int, error) { + startOffset := uint64(termNotEncoded) + if len(c.final) <= 0 { + return startOffset, 0, nil + } + + if chw := w.(*CountHashWriter); chw != nil { + startOffset = uint64(chw.Count()) + } + + tw, err := c.Write(w) + return startOffset, tw, err +} + +func (c *chunkedIntCoder) FinalSize() int { + return len(c.final) +} + +// modifyLengthsToEndOffsets converts the chunk length array +// to a chunk offset array. The readChunkBoundary +// will figure out the start and end of every chunk from +// these offsets. Starting offset of i'th index is stored +// in i-1'th position except for 0'th index and ending offset +// is stored at i'th index position. +// For 0'th element, starting position is always zero. +// eg: +// Lens -> 5 5 5 5 => 5 10 15 20 +// Lens -> 0 5 0 5 => 0 5 5 10 +// Lens -> 0 0 0 5 => 0 0 0 5 +// Lens -> 5 0 0 0 => 5 5 5 5 +// Lens -> 0 5 0 0 => 0 5 5 5 +// Lens -> 0 0 5 0 => 0 0 5 5 +func modifyLengthsToEndOffsets(lengths []uint64) []uint64 { + var runningOffset uint64 + var index, i int + for i = 1; i <= len(lengths); i++ { + runningOffset += lengths[i-1] + lengths[index] = runningOffset + index++ + } + return lengths +} + +func readChunkBoundary(chunk int, offsets []uint64) (uint64, uint64) { + var start uint64 + if chunk > 0 { + start = offsets[chunk-1] + } + return start, offsets[chunk] +} diff --git a/vendor/github.com/blevesearch/zap/v12/merge.go b/vendor/github.com/blevesearch/zap/v12/merge.go new file mode 100644 index 0000000000..805100fb5f --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/merge.go @@ -0,0 +1,847 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "math" + "os" + "sort" + + "github.com/RoaringBitmap/roaring" + seg "github.com/blevesearch/bleve/index/scorch/segment" + "github.com/couchbase/vellum" + "github.com/golang/snappy" +) + +var DefaultFileMergerBufferSize = 1024 * 1024 + +const docDropped = math.MaxUint64 // sentinel docNum to represent a deleted doc + +// Merge takes a slice of segments and bit masks describing which +// documents may be dropped, and creates a new segment containing the +// remaining data. This new segment is built at the specified path. +func (*ZapPlugin) Merge(segments []seg.Segment, drops []*roaring.Bitmap, path string, + closeCh chan struct{}, s seg.StatsReporter) ( + [][]uint64, uint64, error) { + + segmentBases := make([]*SegmentBase, len(segments)) + for segmenti, segment := range segments { + switch segmentx := segment.(type) { + case *Segment: + segmentBases[segmenti] = &segmentx.SegmentBase + case *SegmentBase: + segmentBases[segmenti] = segmentx + default: + panic(fmt.Sprintf("oops, unexpected segment type: %T", segment)) + } + } + return mergeSegmentBases(segmentBases, drops, path, DefaultChunkMode, closeCh, s) +} + +func mergeSegmentBases(segmentBases []*SegmentBase, drops []*roaring.Bitmap, path string, + chunkMode uint32, closeCh chan struct{}, s seg.StatsReporter) ( + [][]uint64, uint64, error) { + flag := os.O_RDWR | os.O_CREATE + + f, err := os.OpenFile(path, flag, 0600) + if err != nil { + return nil, 0, err + } + + cleanup := func() { + _ = f.Close() + _ = os.Remove(path) + } + + // buffer the output + br := bufio.NewWriterSize(f, DefaultFileMergerBufferSize) + + // wrap it for counting (tracking offsets) + cr := NewCountHashWriterWithStatsReporter(br, s) + + newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, _, _, _, err := + MergeToWriter(segmentBases, drops, chunkMode, cr, closeCh) + if err != nil { + cleanup() + return nil, 0, err + } + + err = persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, + docValueOffset, chunkMode, cr.Sum32(), cr) + if err != nil { + cleanup() + return nil, 0, err + } + + err = br.Flush() + if err != nil { + cleanup() + return nil, 0, err + } + + err = f.Sync() + if err != nil { + cleanup() + return nil, 0, err + } + + err = f.Close() + if err != nil { + cleanup() + return nil, 0, err + } + + return newDocNums, uint64(cr.Count()), nil +} + +func MergeToWriter(segments []*SegmentBase, drops []*roaring.Bitmap, + chunkMode uint32, cr *CountHashWriter, closeCh chan struct{}) ( + newDocNums [][]uint64, + numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, + dictLocs []uint64, fieldsInv []string, fieldsMap map[string]uint16, + err error) { + docValueOffset = uint64(fieldNotUninverted) + + var fieldsSame bool + fieldsSame, fieldsInv = mergeFields(segments) + fieldsMap = mapFields(fieldsInv) + + numDocs = computeNewDocCount(segments, drops) + + if isClosed(closeCh) { + return nil, 0, 0, 0, 0, nil, nil, nil, seg.ErrClosed + } + + if numDocs > 0 { + storedIndexOffset, newDocNums, err = mergeStoredAndRemap(segments, drops, + fieldsMap, fieldsInv, fieldsSame, numDocs, cr, closeCh) + if err != nil { + return nil, 0, 0, 0, 0, nil, nil, nil, err + } + + dictLocs, docValueOffset, err = persistMergedRest(segments, drops, + fieldsInv, fieldsMap, fieldsSame, + newDocNums, numDocs, chunkMode, cr, closeCh) + if err != nil { + return nil, 0, 0, 0, 0, nil, nil, nil, err + } + } else { + dictLocs = make([]uint64, len(fieldsInv)) + } + + fieldsIndexOffset, err = persistFields(fieldsInv, cr, dictLocs) + if err != nil { + return nil, 0, 0, 0, 0, nil, nil, nil, err + } + + return newDocNums, numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset, dictLocs, fieldsInv, fieldsMap, nil +} + +// mapFields takes the fieldsInv list and returns a map of fieldName +// to fieldID+1 +func mapFields(fields []string) map[string]uint16 { + rv := make(map[string]uint16, len(fields)) + for i, fieldName := range fields { + rv[fieldName] = uint16(i) + 1 + } + return rv +} + +// computeNewDocCount determines how many documents will be in the newly +// merged segment when obsoleted docs are dropped +func computeNewDocCount(segments []*SegmentBase, drops []*roaring.Bitmap) uint64 { + var newDocCount uint64 + for segI, segment := range segments { + newDocCount += segment.numDocs + if drops[segI] != nil { + newDocCount -= drops[segI].GetCardinality() + } + } + return newDocCount +} + +func persistMergedRest(segments []*SegmentBase, dropsIn []*roaring.Bitmap, + fieldsInv []string, fieldsMap map[string]uint16, fieldsSame bool, + newDocNumsIn [][]uint64, newSegDocCount uint64, chunkMode uint32, + w *CountHashWriter, closeCh chan struct{}) ([]uint64, uint64, error) { + + var bufMaxVarintLen64 []byte = make([]byte, binary.MaxVarintLen64) + var bufLoc []uint64 + + var postings *PostingsList + var postItr *PostingsIterator + + rv := make([]uint64, len(fieldsInv)) + fieldDvLocsStart := make([]uint64, len(fieldsInv)) + fieldDvLocsEnd := make([]uint64, len(fieldsInv)) + + // these int coders are initialized with chunk size 1024 + // however this will be reset to the correct chunk size + // while processing each individual field-term section + tfEncoder := newChunkedIntCoder(1024, newSegDocCount-1) + locEncoder := newChunkedIntCoder(1024, newSegDocCount-1) + + var vellumBuf bytes.Buffer + newVellum, err := vellum.New(&vellumBuf, nil) + if err != nil { + return nil, 0, err + } + + newRoaring := roaring.NewBitmap() + + // for each field + for fieldID, fieldName := range fieldsInv { + + // collect FST iterators from all active segments for this field + var newDocNums [][]uint64 + var drops []*roaring.Bitmap + var dicts []*Dictionary + var itrs []vellum.Iterator + + var segmentsInFocus []*SegmentBase + + for segmentI, segment := range segments { + + // check for the closure in meantime + if isClosed(closeCh) { + return nil, 0, seg.ErrClosed + } + + dict, err2 := segment.dictionary(fieldName) + if err2 != nil { + return nil, 0, err2 + } + if dict != nil && dict.fst != nil { + itr, err2 := dict.fst.Iterator(nil, nil) + if err2 != nil && err2 != vellum.ErrIteratorDone { + return nil, 0, err2 + } + if itr != nil { + newDocNums = append(newDocNums, newDocNumsIn[segmentI]) + if dropsIn[segmentI] != nil && !dropsIn[segmentI].IsEmpty() { + drops = append(drops, dropsIn[segmentI]) + } else { + drops = append(drops, nil) + } + dicts = append(dicts, dict) + itrs = append(itrs, itr) + segmentsInFocus = append(segmentsInFocus, segment) + } + } + } + + var prevTerm []byte + + newRoaring.Clear() + + var lastDocNum, lastFreq, lastNorm uint64 + + // determines whether to use "1-hit" encoding optimization + // when a term appears in only 1 doc, with no loc info, + // has freq of 1, and the docNum fits into 31-bits + use1HitEncoding := func(termCardinality uint64) (bool, uint64, uint64) { + if termCardinality == uint64(1) && locEncoder.FinalSize() <= 0 { + docNum := uint64(newRoaring.Minimum()) + if under32Bits(docNum) && docNum == lastDocNum && lastFreq == 1 { + return true, docNum, lastNorm + } + } + return false, 0, 0 + } + + finishTerm := func(term []byte) error { + tfEncoder.Close() + locEncoder.Close() + + postingsOffset, err := writePostings(newRoaring, + tfEncoder, locEncoder, use1HitEncoding, w, bufMaxVarintLen64) + if err != nil { + return err + } + + if postingsOffset > 0 { + err = newVellum.Insert(term, postingsOffset) + if err != nil { + return err + } + } + + newRoaring.Clear() + + tfEncoder.Reset() + locEncoder.Reset() + + lastDocNum = 0 + lastFreq = 0 + lastNorm = 0 + + return nil + } + + enumerator, err := newEnumerator(itrs) + + for err == nil { + term, itrI, postingsOffset := enumerator.Current() + + if !bytes.Equal(prevTerm, term) { + // check for the closure in meantime + if isClosed(closeCh) { + return nil, 0, seg.ErrClosed + } + + // if the term changed, write out the info collected + // for the previous term + err = finishTerm(prevTerm) + if err != nil { + return nil, 0, err + } + } + if !bytes.Equal(prevTerm, term) || prevTerm == nil { + // compute cardinality of field-term in new seg + var newCard uint64 + lowItrIdxs, lowItrVals := enumerator.GetLowIdxsAndValues() + for i, idx := range lowItrIdxs { + pl, err := dicts[idx].postingsListFromOffset(lowItrVals[i], drops[idx], nil) + if err != nil { + return nil, 0, err + } + newCard += pl.Count() + } + // compute correct chunk size with this + chunkSize, err := getChunkSize(chunkMode, newCard, newSegDocCount) + if err != nil { + return nil, 0, err + } + // update encoders chunk + tfEncoder.SetChunkSize(chunkSize, newSegDocCount-1) + locEncoder.SetChunkSize(chunkSize, newSegDocCount-1) + } + + postings, err = dicts[itrI].postingsListFromOffset( + postingsOffset, drops[itrI], postings) + if err != nil { + return nil, 0, err + } + + postItr = postings.iterator(true, true, true, postItr) + + // can no longer optimize by copying, since chunk factor could have changed + lastDocNum, lastFreq, lastNorm, bufLoc, err = mergeTermFreqNormLocs( + fieldsMap, term, postItr, newDocNums[itrI], newRoaring, + tfEncoder, locEncoder, bufLoc) + + if err != nil { + return nil, 0, err + } + + prevTerm = prevTerm[:0] // copy to prevTerm in case Next() reuses term mem + prevTerm = append(prevTerm, term...) + + err = enumerator.Next() + } + if err != vellum.ErrIteratorDone { + return nil, 0, err + } + + err = finishTerm(prevTerm) + if err != nil { + return nil, 0, err + } + + dictOffset := uint64(w.Count()) + + err = newVellum.Close() + if err != nil { + return nil, 0, err + } + vellumData := vellumBuf.Bytes() + + // write out the length of the vellum data + n := binary.PutUvarint(bufMaxVarintLen64, uint64(len(vellumData))) + _, err = w.Write(bufMaxVarintLen64[:n]) + if err != nil { + return nil, 0, err + } + + // write this vellum to disk + _, err = w.Write(vellumData) + if err != nil { + return nil, 0, err + } + + rv[fieldID] = dictOffset + + // get the field doc value offset (start) + fieldDvLocsStart[fieldID] = uint64(w.Count()) + + // update the field doc values + // NOTE: doc values continue to use legacy chunk mode + chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) + if err != nil { + return nil, 0, err + } + fdvEncoder := newChunkedContentCoder(chunkSize, newSegDocCount-1, w, true) + + fdvReadersAvailable := false + var dvIterClone *docValueReader + for segmentI, segment := range segmentsInFocus { + // check for the closure in meantime + if isClosed(closeCh) { + return nil, 0, seg.ErrClosed + } + + fieldIDPlus1 := uint16(segment.fieldsMap[fieldName]) + if dvIter, exists := segment.fieldDvReaders[fieldIDPlus1-1]; exists && + dvIter != nil { + fdvReadersAvailable = true + dvIterClone = dvIter.cloneInto(dvIterClone) + err = dvIterClone.iterateAllDocValues(segment, func(docNum uint64, terms []byte) error { + if newDocNums[segmentI][docNum] == docDropped { + return nil + } + err := fdvEncoder.Add(newDocNums[segmentI][docNum], terms) + if err != nil { + return err + } + return nil + }) + if err != nil { + return nil, 0, err + } + } + } + + if fdvReadersAvailable { + err = fdvEncoder.Close() + if err != nil { + return nil, 0, err + } + + // persist the doc value details for this field + _, err = fdvEncoder.Write() + if err != nil { + return nil, 0, err + } + + // get the field doc value offset (end) + fieldDvLocsEnd[fieldID] = uint64(w.Count()) + } else { + fieldDvLocsStart[fieldID] = fieldNotUninverted + fieldDvLocsEnd[fieldID] = fieldNotUninverted + } + + // reset vellum buffer and vellum builder + vellumBuf.Reset() + err = newVellum.Reset(&vellumBuf) + if err != nil { + return nil, 0, err + } + } + + fieldDvLocsOffset := uint64(w.Count()) + + buf := bufMaxVarintLen64 + for i := 0; i < len(fieldDvLocsStart); i++ { + n := binary.PutUvarint(buf, fieldDvLocsStart[i]) + _, err := w.Write(buf[:n]) + if err != nil { + return nil, 0, err + } + n = binary.PutUvarint(buf, fieldDvLocsEnd[i]) + _, err = w.Write(buf[:n]) + if err != nil { + return nil, 0, err + } + } + + return rv, fieldDvLocsOffset, nil +} + +func mergeTermFreqNormLocs(fieldsMap map[string]uint16, term []byte, postItr *PostingsIterator, + newDocNums []uint64, newRoaring *roaring.Bitmap, + tfEncoder *chunkedIntCoder, locEncoder *chunkedIntCoder, bufLoc []uint64) ( + lastDocNum uint64, lastFreq uint64, lastNorm uint64, bufLocOut []uint64, err error) { + next, err := postItr.Next() + for next != nil && err == nil { + hitNewDocNum := newDocNums[next.Number()] + if hitNewDocNum == docDropped { + return 0, 0, 0, nil, fmt.Errorf("see hit with dropped docNum") + } + + newRoaring.Add(uint32(hitNewDocNum)) + + nextFreq := next.Frequency() + nextNorm := uint64(math.Float32bits(float32(next.Norm()))) + + locs := next.Locations() + + err = tfEncoder.Add(hitNewDocNum, + encodeFreqHasLocs(nextFreq, len(locs) > 0), nextNorm) + if err != nil { + return 0, 0, 0, nil, err + } + + if len(locs) > 0 { + numBytesLocs := 0 + for _, loc := range locs { + ap := loc.ArrayPositions() + numBytesLocs += totalUvarintBytes(uint64(fieldsMap[loc.Field()]-1), + loc.Pos(), loc.Start(), loc.End(), uint64(len(ap)), ap) + } + + err = locEncoder.Add(hitNewDocNum, uint64(numBytesLocs)) + if err != nil { + return 0, 0, 0, nil, err + } + + for _, loc := range locs { + ap := loc.ArrayPositions() + if cap(bufLoc) < 5+len(ap) { + bufLoc = make([]uint64, 0, 5+len(ap)) + } + args := bufLoc[0:5] + args[0] = uint64(fieldsMap[loc.Field()] - 1) + args[1] = loc.Pos() + args[2] = loc.Start() + args[3] = loc.End() + args[4] = uint64(len(ap)) + args = append(args, ap...) + err = locEncoder.Add(hitNewDocNum, args...) + if err != nil { + return 0, 0, 0, nil, err + } + } + } + + lastDocNum = hitNewDocNum + lastFreq = nextFreq + lastNorm = nextNorm + + next, err = postItr.Next() + } + + return lastDocNum, lastFreq, lastNorm, bufLoc, err +} + +func writePostings(postings *roaring.Bitmap, tfEncoder, locEncoder *chunkedIntCoder, + use1HitEncoding func(uint64) (bool, uint64, uint64), + w *CountHashWriter, bufMaxVarintLen64 []byte) ( + offset uint64, err error) { + termCardinality := postings.GetCardinality() + if termCardinality <= 0 { + return 0, nil + } + + if use1HitEncoding != nil { + encodeAs1Hit, docNum1Hit, normBits1Hit := use1HitEncoding(termCardinality) + if encodeAs1Hit { + return FSTValEncode1Hit(docNum1Hit, normBits1Hit), nil + } + } + + var tfOffset uint64 + tfOffset, _, err = tfEncoder.writeAt(w) + if err != nil { + return 0, err + } + + var locOffset uint64 + locOffset, _, err = locEncoder.writeAt(w) + if err != nil { + return 0, err + } + + postingsOffset := uint64(w.Count()) + + n := binary.PutUvarint(bufMaxVarintLen64, tfOffset) + _, err = w.Write(bufMaxVarintLen64[:n]) + if err != nil { + return 0, err + } + + n = binary.PutUvarint(bufMaxVarintLen64, locOffset) + _, err = w.Write(bufMaxVarintLen64[:n]) + if err != nil { + return 0, err + } + + _, err = writeRoaringWithLen(postings, w, bufMaxVarintLen64) + if err != nil { + return 0, err + } + + return postingsOffset, nil +} + +type varintEncoder func(uint64) (int, error) + +func mergeStoredAndRemap(segments []*SegmentBase, drops []*roaring.Bitmap, + fieldsMap map[string]uint16, fieldsInv []string, fieldsSame bool, newSegDocCount uint64, + w *CountHashWriter, closeCh chan struct{}) (uint64, [][]uint64, error) { + var rv [][]uint64 // The remapped or newDocNums for each segment. + + var newDocNum uint64 + + var curr int + var data, compressed []byte + var metaBuf bytes.Buffer + varBuf := make([]byte, binary.MaxVarintLen64) + metaEncode := func(val uint64) (int, error) { + wb := binary.PutUvarint(varBuf, val) + return metaBuf.Write(varBuf[:wb]) + } + + vals := make([][][]byte, len(fieldsInv)) + typs := make([][]byte, len(fieldsInv)) + poss := make([][][]uint64, len(fieldsInv)) + + var posBuf []uint64 + + docNumOffsets := make([]uint64, newSegDocCount) + + vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) + defer visitDocumentCtxPool.Put(vdc) + + // for each segment + for segI, segment := range segments { + // check for the closure in meantime + if isClosed(closeCh) { + return 0, nil, seg.ErrClosed + } + + segNewDocNums := make([]uint64, segment.numDocs) + + dropsI := drops[segI] + + // optimize when the field mapping is the same across all + // segments and there are no deletions, via byte-copying + // of stored docs bytes directly to the writer + if fieldsSame && (dropsI == nil || dropsI.GetCardinality() == 0) { + err := segment.copyStoredDocs(newDocNum, docNumOffsets, w) + if err != nil { + return 0, nil, err + } + + for i := uint64(0); i < segment.numDocs; i++ { + segNewDocNums[i] = newDocNum + newDocNum++ + } + rv = append(rv, segNewDocNums) + + continue + } + + // for each doc num + for docNum := uint64(0); docNum < segment.numDocs; docNum++ { + // TODO: roaring's API limits docNums to 32-bits? + if dropsI != nil && dropsI.Contains(uint32(docNum)) { + segNewDocNums[docNum] = docDropped + continue + } + + segNewDocNums[docNum] = newDocNum + + curr = 0 + metaBuf.Reset() + data = data[:0] + + posTemp := posBuf + + // collect all the data + for i := 0; i < len(fieldsInv); i++ { + vals[i] = vals[i][:0] + typs[i] = typs[i][:0] + poss[i] = poss[i][:0] + } + err := segment.visitDocument(vdc, docNum, func(field string, typ byte, value []byte, pos []uint64) bool { + fieldID := int(fieldsMap[field]) - 1 + vals[fieldID] = append(vals[fieldID], value) + typs[fieldID] = append(typs[fieldID], typ) + + // copy array positions to preserve them beyond the scope of this callback + var curPos []uint64 + if len(pos) > 0 { + if cap(posTemp) < len(pos) { + posBuf = make([]uint64, len(pos)*len(fieldsInv)) + posTemp = posBuf + } + curPos = posTemp[0:len(pos)] + copy(curPos, pos) + posTemp = posTemp[len(pos):] + } + poss[fieldID] = append(poss[fieldID], curPos) + + return true + }) + if err != nil { + return 0, nil, err + } + + // _id field special case optimizes ExternalID() lookups + idFieldVal := vals[uint16(0)][0] + _, err = metaEncode(uint64(len(idFieldVal))) + if err != nil { + return 0, nil, err + } + + // now walk the non-"_id" fields in order + for fieldID := 1; fieldID < len(fieldsInv); fieldID++ { + storedFieldValues := vals[fieldID] + + stf := typs[fieldID] + spf := poss[fieldID] + + var err2 error + curr, data, err2 = persistStoredFieldValues(fieldID, + storedFieldValues, stf, spf, curr, metaEncode, data) + if err2 != nil { + return 0, nil, err2 + } + } + + metaBytes := metaBuf.Bytes() + + compressed = snappy.Encode(compressed[:cap(compressed)], data) + + // record where we're about to start writing + docNumOffsets[newDocNum] = uint64(w.Count()) + + // write out the meta len and compressed data len + _, err = writeUvarints(w, + uint64(len(metaBytes)), + uint64(len(idFieldVal)+len(compressed))) + if err != nil { + return 0, nil, err + } + // now write the meta + _, err = w.Write(metaBytes) + if err != nil { + return 0, nil, err + } + // now write the _id field val (counted as part of the 'compressed' data) + _, err = w.Write(idFieldVal) + if err != nil { + return 0, nil, err + } + // now write the compressed data + _, err = w.Write(compressed) + if err != nil { + return 0, nil, err + } + + newDocNum++ + } + + rv = append(rv, segNewDocNums) + } + + // return value is the start of the stored index + storedIndexOffset := uint64(w.Count()) + + // now write out the stored doc index + for _, docNumOffset := range docNumOffsets { + err := binary.Write(w, binary.BigEndian, docNumOffset) + if err != nil { + return 0, nil, err + } + } + + return storedIndexOffset, rv, nil +} + +// copyStoredDocs writes out a segment's stored doc info, optimized by +// using a single Write() call for the entire set of bytes. The +// newDocNumOffsets is filled with the new offsets for each doc. +func (s *SegmentBase) copyStoredDocs(newDocNum uint64, newDocNumOffsets []uint64, + w *CountHashWriter) error { + if s.numDocs <= 0 { + return nil + } + + indexOffset0, storedOffset0, _, _, _ := + s.getDocStoredOffsets(0) // the segment's first doc + + indexOffsetN, storedOffsetN, readN, metaLenN, dataLenN := + s.getDocStoredOffsets(s.numDocs - 1) // the segment's last doc + + storedOffset0New := uint64(w.Count()) + + storedBytes := s.mem[storedOffset0 : storedOffsetN+readN+metaLenN+dataLenN] + _, err := w.Write(storedBytes) + if err != nil { + return err + } + + // remap the storedOffset's for the docs into new offsets relative + // to storedOffset0New, filling the given docNumOffsetsOut array + for indexOffset := indexOffset0; indexOffset <= indexOffsetN; indexOffset += 8 { + storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) + storedOffsetNew := storedOffset - storedOffset0 + storedOffset0New + newDocNumOffsets[newDocNum] = storedOffsetNew + newDocNum += 1 + } + + return nil +} + +// mergeFields builds a unified list of fields used across all the +// input segments, and computes whether the fields are the same across +// segments (which depends on fields to be sorted in the same way +// across segments) +func mergeFields(segments []*SegmentBase) (bool, []string) { + fieldsSame := true + + var segment0Fields []string + if len(segments) > 0 { + segment0Fields = segments[0].Fields() + } + + fieldsExist := map[string]struct{}{} + for _, segment := range segments { + fields := segment.Fields() + for fieldi, field := range fields { + fieldsExist[field] = struct{}{} + if len(segment0Fields) != len(fields) || segment0Fields[fieldi] != field { + fieldsSame = false + } + } + } + + rv := make([]string, 0, len(fieldsExist)) + // ensure _id stays first + rv = append(rv, "_id") + for k := range fieldsExist { + if k != "_id" { + rv = append(rv, k) + } + } + + sort.Strings(rv[1:]) // leave _id as first + + return fieldsSame, rv +} + +func isClosed(closeCh chan struct{}) bool { + select { + case <-closeCh: + return true + default: + return false + } +} diff --git a/vendor/github.com/blevesearch/zap/v12/new.go b/vendor/github.com/blevesearch/zap/v12/new.go new file mode 100644 index 0000000000..98158186cb --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/new.go @@ -0,0 +1,860 @@ +// Copyright (c) 2018 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + "encoding/binary" + "math" + "sort" + "sync" + + "github.com/RoaringBitmap/roaring" + "github.com/blevesearch/bleve/analysis" + "github.com/blevesearch/bleve/document" + "github.com/blevesearch/bleve/index" + "github.com/blevesearch/bleve/index/scorch/segment" + "github.com/couchbase/vellum" + "github.com/golang/snappy" +) + +var NewSegmentBufferNumResultsBump int = 100 +var NewSegmentBufferNumResultsFactor float64 = 1.0 +var NewSegmentBufferAvgBytesPerDocFactor float64 = 1.0 + +// ValidateDocFields can be set by applications to perform additional checks +// on fields in a document being added to a new segment, by default it does +// nothing. +// This API is experimental and may be removed at any time. +var ValidateDocFields = func(field document.Field) error { + return nil +} + +// AnalysisResultsToSegmentBase produces an in-memory zap-encoded +// SegmentBase from analysis results +func (z *ZapPlugin) New(results []*index.AnalysisResult) ( + segment.Segment, uint64, error) { + return z.newWithChunkMode(results, DefaultChunkMode) +} + +func (*ZapPlugin) newWithChunkMode(results []*index.AnalysisResult, + chunkMode uint32) (segment.Segment, uint64, error) { + s := interimPool.Get().(*interim) + + var br bytes.Buffer + if s.lastNumDocs > 0 { + // use previous results to initialize the buf with an estimate + // size, but note that the interim instance comes from a + // global interimPool, so multiple scorch instances indexing + // different docs can lead to low quality estimates + estimateAvgBytesPerDoc := int(float64(s.lastOutSize/s.lastNumDocs) * + NewSegmentBufferNumResultsFactor) + estimateNumResults := int(float64(len(results)+NewSegmentBufferNumResultsBump) * + NewSegmentBufferAvgBytesPerDocFactor) + br.Grow(estimateAvgBytesPerDoc * estimateNumResults) + } + + s.results = results + s.chunkMode = chunkMode + s.w = NewCountHashWriter(&br) + + storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, + err := s.convert() + if err != nil { + return nil, uint64(0), err + } + + sb, err := InitSegmentBase(br.Bytes(), s.w.Sum32(), chunkMode, + s.FieldsMap, s.FieldsInv, uint64(len(results)), + storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets) + + if err == nil && s.reset() == nil { + s.lastNumDocs = len(results) + s.lastOutSize = len(br.Bytes()) + interimPool.Put(s) + } + + return sb, uint64(len(br.Bytes())), err +} + +var interimPool = sync.Pool{New: func() interface{} { return &interim{} }} + +// interim holds temporary working data used while converting from +// analysis results to a zap-encoded segment +type interim struct { + results []*index.AnalysisResult + + chunkMode uint32 + + w *CountHashWriter + + // FieldsMap adds 1 to field id to avoid zero value issues + // name -> field id + 1 + FieldsMap map[string]uint16 + + // FieldsInv is the inverse of FieldsMap + // field id -> name + FieldsInv []string + + // Term dictionaries for each field + // field id -> term -> postings list id + 1 + Dicts []map[string]uint64 + + // Terms for each field, where terms are sorted ascending + // field id -> []term + DictKeys [][]string + + // Fields whose IncludeDocValues is true + // field id -> bool + IncludeDocValues []bool + + // postings id -> bitmap of docNums + Postings []*roaring.Bitmap + + // postings id -> freq/norm's, one for each docNum in postings + FreqNorms [][]interimFreqNorm + freqNormsBacking []interimFreqNorm + + // postings id -> locs, one for each freq + Locs [][]interimLoc + locsBacking []interimLoc + + numTermsPerPostingsList []int // key is postings list id + numLocsPerPostingsList []int // key is postings list id + + builder *vellum.Builder + builderBuf bytes.Buffer + + metaBuf bytes.Buffer + + tmp0 []byte + tmp1 []byte + + lastNumDocs int + lastOutSize int +} + +func (s *interim) reset() (err error) { + s.results = nil + s.chunkMode = 0 + s.w = nil + s.FieldsMap = nil + s.FieldsInv = nil + for i := range s.Dicts { + s.Dicts[i] = nil + } + s.Dicts = s.Dicts[:0] + for i := range s.DictKeys { + s.DictKeys[i] = s.DictKeys[i][:0] + } + s.DictKeys = s.DictKeys[:0] + for i := range s.IncludeDocValues { + s.IncludeDocValues[i] = false + } + s.IncludeDocValues = s.IncludeDocValues[:0] + for _, idn := range s.Postings { + idn.Clear() + } + s.Postings = s.Postings[:0] + s.FreqNorms = s.FreqNorms[:0] + for i := range s.freqNormsBacking { + s.freqNormsBacking[i] = interimFreqNorm{} + } + s.freqNormsBacking = s.freqNormsBacking[:0] + s.Locs = s.Locs[:0] + for i := range s.locsBacking { + s.locsBacking[i] = interimLoc{} + } + s.locsBacking = s.locsBacking[:0] + s.numTermsPerPostingsList = s.numTermsPerPostingsList[:0] + s.numLocsPerPostingsList = s.numLocsPerPostingsList[:0] + s.builderBuf.Reset() + if s.builder != nil { + err = s.builder.Reset(&s.builderBuf) + } + s.metaBuf.Reset() + s.tmp0 = s.tmp0[:0] + s.tmp1 = s.tmp1[:0] + s.lastNumDocs = 0 + s.lastOutSize = 0 + + return err +} + +func (s *interim) grabBuf(size int) []byte { + buf := s.tmp0 + if cap(buf) < size { + buf = make([]byte, size) + s.tmp0 = buf + } + return buf[0:size] +} + +type interimStoredField struct { + vals [][]byte + typs []byte + arrayposs [][]uint64 // array positions +} + +type interimFreqNorm struct { + freq uint64 + norm float32 + numLocs int +} + +type interimLoc struct { + fieldID uint16 + pos uint64 + start uint64 + end uint64 + arrayposs []uint64 +} + +func (s *interim) convert() (uint64, uint64, uint64, []uint64, error) { + s.FieldsMap = map[string]uint16{} + + s.getOrDefineField("_id") // _id field is fieldID 0 + + for _, result := range s.results { + for _, field := range result.Document.CompositeFields { + s.getOrDefineField(field.Name()) + } + for _, field := range result.Document.Fields { + s.getOrDefineField(field.Name()) + } + } + + sort.Strings(s.FieldsInv[1:]) // keep _id as first field + + for fieldID, fieldName := range s.FieldsInv { + s.FieldsMap[fieldName] = uint16(fieldID + 1) + } + + if cap(s.IncludeDocValues) >= len(s.FieldsInv) { + s.IncludeDocValues = s.IncludeDocValues[:len(s.FieldsInv)] + } else { + s.IncludeDocValues = make([]bool, len(s.FieldsInv)) + } + + s.prepareDicts() + + for _, dict := range s.DictKeys { + sort.Strings(dict) + } + + s.processDocuments() + + storedIndexOffset, err := s.writeStoredFields() + if err != nil { + return 0, 0, 0, nil, err + } + + var fdvIndexOffset uint64 + var dictOffsets []uint64 + + if len(s.results) > 0 { + fdvIndexOffset, dictOffsets, err = s.writeDicts() + if err != nil { + return 0, 0, 0, nil, err + } + } else { + dictOffsets = make([]uint64, len(s.FieldsInv)) + } + + fieldsIndexOffset, err := persistFields(s.FieldsInv, s.w, dictOffsets) + if err != nil { + return 0, 0, 0, nil, err + } + + return storedIndexOffset, fieldsIndexOffset, fdvIndexOffset, dictOffsets, nil +} + +func (s *interim) getOrDefineField(fieldName string) int { + fieldIDPlus1, exists := s.FieldsMap[fieldName] + if !exists { + fieldIDPlus1 = uint16(len(s.FieldsInv) + 1) + s.FieldsMap[fieldName] = fieldIDPlus1 + s.FieldsInv = append(s.FieldsInv, fieldName) + + s.Dicts = append(s.Dicts, make(map[string]uint64)) + + n := len(s.DictKeys) + if n < cap(s.DictKeys) { + s.DictKeys = s.DictKeys[:n+1] + s.DictKeys[n] = s.DictKeys[n][:0] + } else { + s.DictKeys = append(s.DictKeys, []string(nil)) + } + } + + return int(fieldIDPlus1 - 1) +} + +// fill Dicts and DictKeys from analysis results +func (s *interim) prepareDicts() { + var pidNext int + + var totTFs int + var totLocs int + + visitField := func(fieldID uint16, tfs analysis.TokenFrequencies) { + dict := s.Dicts[fieldID] + dictKeys := s.DictKeys[fieldID] + + for term, tf := range tfs { + pidPlus1, exists := dict[term] + if !exists { + pidNext++ + pidPlus1 = uint64(pidNext) + + dict[term] = pidPlus1 + dictKeys = append(dictKeys, term) + + s.numTermsPerPostingsList = append(s.numTermsPerPostingsList, 0) + s.numLocsPerPostingsList = append(s.numLocsPerPostingsList, 0) + } + + pid := pidPlus1 - 1 + + s.numTermsPerPostingsList[pid] += 1 + s.numLocsPerPostingsList[pid] += len(tf.Locations) + + totLocs += len(tf.Locations) + } + + totTFs += len(tfs) + + s.DictKeys[fieldID] = dictKeys + } + + for _, result := range s.results { + // walk each composite field + for _, field := range result.Document.CompositeFields { + fieldID := uint16(s.getOrDefineField(field.Name())) + _, tf := field.Analyze() + visitField(fieldID, tf) + } + + // walk each field + for i, field := range result.Document.Fields { + fieldID := uint16(s.getOrDefineField(field.Name())) + tf := result.Analyzed[i] + visitField(fieldID, tf) + } + } + + numPostingsLists := pidNext + + if cap(s.Postings) >= numPostingsLists { + s.Postings = s.Postings[:numPostingsLists] + } else { + postings := make([]*roaring.Bitmap, numPostingsLists) + copy(postings, s.Postings[:cap(s.Postings)]) + for i := 0; i < numPostingsLists; i++ { + if postings[i] == nil { + postings[i] = roaring.New() + } + } + s.Postings = postings + } + + if cap(s.FreqNorms) >= numPostingsLists { + s.FreqNorms = s.FreqNorms[:numPostingsLists] + } else { + s.FreqNorms = make([][]interimFreqNorm, numPostingsLists) + } + + if cap(s.freqNormsBacking) >= totTFs { + s.freqNormsBacking = s.freqNormsBacking[:totTFs] + } else { + s.freqNormsBacking = make([]interimFreqNorm, totTFs) + } + + freqNormsBacking := s.freqNormsBacking + for pid, numTerms := range s.numTermsPerPostingsList { + s.FreqNorms[pid] = freqNormsBacking[0:0] + freqNormsBacking = freqNormsBacking[numTerms:] + } + + if cap(s.Locs) >= numPostingsLists { + s.Locs = s.Locs[:numPostingsLists] + } else { + s.Locs = make([][]interimLoc, numPostingsLists) + } + + if cap(s.locsBacking) >= totLocs { + s.locsBacking = s.locsBacking[:totLocs] + } else { + s.locsBacking = make([]interimLoc, totLocs) + } + + locsBacking := s.locsBacking + for pid, numLocs := range s.numLocsPerPostingsList { + s.Locs[pid] = locsBacking[0:0] + locsBacking = locsBacking[numLocs:] + } +} + +func (s *interim) processDocuments() { + numFields := len(s.FieldsInv) + reuseFieldLens := make([]int, numFields) + reuseFieldTFs := make([]analysis.TokenFrequencies, numFields) + + for docNum, result := range s.results { + for i := 0; i < numFields; i++ { // clear these for reuse + reuseFieldLens[i] = 0 + reuseFieldTFs[i] = nil + } + + s.processDocument(uint64(docNum), result, + reuseFieldLens, reuseFieldTFs) + } +} + +func (s *interim) processDocument(docNum uint64, + result *index.AnalysisResult, + fieldLens []int, fieldTFs []analysis.TokenFrequencies) { + visitField := func(fieldID uint16, fieldName string, + ln int, tf analysis.TokenFrequencies) { + fieldLens[fieldID] += ln + + existingFreqs := fieldTFs[fieldID] + if existingFreqs != nil { + existingFreqs.MergeAll(fieldName, tf) + } else { + fieldTFs[fieldID] = tf + } + } + + // walk each composite field + for _, field := range result.Document.CompositeFields { + fieldID := uint16(s.getOrDefineField(field.Name())) + ln, tf := field.Analyze() + visitField(fieldID, field.Name(), ln, tf) + } + + // walk each field + for i, field := range result.Document.Fields { + fieldID := uint16(s.getOrDefineField(field.Name())) + ln := result.Length[i] + tf := result.Analyzed[i] + visitField(fieldID, field.Name(), ln, tf) + } + + // now that it's been rolled up into fieldTFs, walk that + for fieldID, tfs := range fieldTFs { + dict := s.Dicts[fieldID] + norm := float32(1.0 / math.Sqrt(float64(fieldLens[fieldID]))) + + for term, tf := range tfs { + pid := dict[term] - 1 + bs := s.Postings[pid] + bs.Add(uint32(docNum)) + + s.FreqNorms[pid] = append(s.FreqNorms[pid], + interimFreqNorm{ + freq: uint64(tf.Frequency()), + norm: norm, + numLocs: len(tf.Locations), + }) + + if len(tf.Locations) > 0 { + locs := s.Locs[pid] + + for _, loc := range tf.Locations { + var locf = uint16(fieldID) + if loc.Field != "" { + locf = uint16(s.getOrDefineField(loc.Field)) + } + var arrayposs []uint64 + if len(loc.ArrayPositions) > 0 { + arrayposs = loc.ArrayPositions + } + locs = append(locs, interimLoc{ + fieldID: locf, + pos: uint64(loc.Position), + start: uint64(loc.Start), + end: uint64(loc.End), + arrayposs: arrayposs, + }) + } + + s.Locs[pid] = locs + } + } + } +} + +func (s *interim) writeStoredFields() ( + storedIndexOffset uint64, err error) { + varBuf := make([]byte, binary.MaxVarintLen64) + metaEncode := func(val uint64) (int, error) { + wb := binary.PutUvarint(varBuf, val) + return s.metaBuf.Write(varBuf[:wb]) + } + + data, compressed := s.tmp0[:0], s.tmp1[:0] + defer func() { s.tmp0, s.tmp1 = data, compressed }() + + // keyed by docNum + docStoredOffsets := make([]uint64, len(s.results)) + + // keyed by fieldID, for the current doc in the loop + docStoredFields := map[uint16]interimStoredField{} + + for docNum, result := range s.results { + for fieldID := range docStoredFields { // reset for next doc + delete(docStoredFields, fieldID) + } + + for _, field := range result.Document.Fields { + fieldID := uint16(s.getOrDefineField(field.Name())) + + opts := field.Options() + + if opts.IsStored() { + isf := docStoredFields[fieldID] + isf.vals = append(isf.vals, field.Value()) + isf.typs = append(isf.typs, encodeFieldType(field)) + isf.arrayposs = append(isf.arrayposs, field.ArrayPositions()) + docStoredFields[fieldID] = isf + } + + if opts.IncludeDocValues() { + s.IncludeDocValues[fieldID] = true + } + + err := ValidateDocFields(field) + if err != nil { + return 0, err + } + } + + var curr int + + s.metaBuf.Reset() + data = data[:0] + + // _id field special case optimizes ExternalID() lookups + idFieldVal := docStoredFields[uint16(0)].vals[0] + _, err = metaEncode(uint64(len(idFieldVal))) + if err != nil { + return 0, err + } + + // handle non-"_id" fields + for fieldID := 1; fieldID < len(s.FieldsInv); fieldID++ { + isf, exists := docStoredFields[uint16(fieldID)] + if exists { + curr, data, err = persistStoredFieldValues( + fieldID, isf.vals, isf.typs, isf.arrayposs, + curr, metaEncode, data) + if err != nil { + return 0, err + } + } + } + + metaBytes := s.metaBuf.Bytes() + + compressed = snappy.Encode(compressed[:cap(compressed)], data) + + docStoredOffsets[docNum] = uint64(s.w.Count()) + + _, err := writeUvarints(s.w, + uint64(len(metaBytes)), + uint64(len(idFieldVal)+len(compressed))) + if err != nil { + return 0, err + } + + _, err = s.w.Write(metaBytes) + if err != nil { + return 0, err + } + + _, err = s.w.Write(idFieldVal) + if err != nil { + return 0, err + } + + _, err = s.w.Write(compressed) + if err != nil { + return 0, err + } + } + + storedIndexOffset = uint64(s.w.Count()) + + for _, docStoredOffset := range docStoredOffsets { + err = binary.Write(s.w, binary.BigEndian, docStoredOffset) + if err != nil { + return 0, err + } + } + + return storedIndexOffset, nil +} + +func (s *interim) writeDicts() (fdvIndexOffset uint64, dictOffsets []uint64, err error) { + dictOffsets = make([]uint64, len(s.FieldsInv)) + + fdvOffsetsStart := make([]uint64, len(s.FieldsInv)) + fdvOffsetsEnd := make([]uint64, len(s.FieldsInv)) + + buf := s.grabBuf(binary.MaxVarintLen64) + + // these int coders are initialized with chunk size 1024 + // however this will be reset to the correct chunk size + // while processing each individual field-term section + tfEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) + locEncoder := newChunkedIntCoder(1024, uint64(len(s.results)-1)) + + var docTermMap [][]byte + + if s.builder == nil { + s.builder, err = vellum.New(&s.builderBuf, nil) + if err != nil { + return 0, nil, err + } + } + + for fieldID, terms := range s.DictKeys { + if cap(docTermMap) < len(s.results) { + docTermMap = make([][]byte, len(s.results)) + } else { + docTermMap = docTermMap[0:len(s.results)] + for docNum := range docTermMap { // reset the docTermMap + docTermMap[docNum] = docTermMap[docNum][:0] + } + } + + dict := s.Dicts[fieldID] + + for _, term := range terms { // terms are already sorted + pid := dict[term] - 1 + + postingsBS := s.Postings[pid] + + freqNorms := s.FreqNorms[pid] + freqNormOffset := 0 + + locs := s.Locs[pid] + locOffset := 0 + + chunkSize, err := getChunkSize(s.chunkMode, postingsBS.GetCardinality(), uint64(len(s.results))) + if err != nil { + return 0, nil, err + } + tfEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) + locEncoder.SetChunkSize(chunkSize, uint64(len(s.results)-1)) + + postingsItr := postingsBS.Iterator() + for postingsItr.HasNext() { + docNum := uint64(postingsItr.Next()) + + freqNorm := freqNorms[freqNormOffset] + + err = tfEncoder.Add(docNum, + encodeFreqHasLocs(freqNorm.freq, freqNorm.numLocs > 0), + uint64(math.Float32bits(freqNorm.norm))) + if err != nil { + return 0, nil, err + } + + if freqNorm.numLocs > 0 { + numBytesLocs := 0 + for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { + numBytesLocs += totalUvarintBytes( + uint64(loc.fieldID), loc.pos, loc.start, loc.end, + uint64(len(loc.arrayposs)), loc.arrayposs) + } + + err = locEncoder.Add(docNum, uint64(numBytesLocs)) + if err != nil { + return 0, nil, err + } + + for _, loc := range locs[locOffset : locOffset+freqNorm.numLocs] { + err = locEncoder.Add(docNum, + uint64(loc.fieldID), loc.pos, loc.start, loc.end, + uint64(len(loc.arrayposs))) + if err != nil { + return 0, nil, err + } + + err = locEncoder.Add(docNum, loc.arrayposs...) + if err != nil { + return 0, nil, err + } + } + + locOffset += freqNorm.numLocs + } + + freqNormOffset++ + + docTermMap[docNum] = append( + append(docTermMap[docNum], term...), + termSeparator) + } + + tfEncoder.Close() + locEncoder.Close() + + postingsOffset, err := + writePostings(postingsBS, tfEncoder, locEncoder, nil, s.w, buf) + if err != nil { + return 0, nil, err + } + + if postingsOffset > uint64(0) { + err = s.builder.Insert([]byte(term), postingsOffset) + if err != nil { + return 0, nil, err + } + } + + tfEncoder.Reset() + locEncoder.Reset() + } + + err = s.builder.Close() + if err != nil { + return 0, nil, err + } + + // record where this dictionary starts + dictOffsets[fieldID] = uint64(s.w.Count()) + + vellumData := s.builderBuf.Bytes() + + // write out the length of the vellum data + n := binary.PutUvarint(buf, uint64(len(vellumData))) + _, err = s.w.Write(buf[:n]) + if err != nil { + return 0, nil, err + } + + // write this vellum to disk + _, err = s.w.Write(vellumData) + if err != nil { + return 0, nil, err + } + + // reset vellum for reuse + s.builderBuf.Reset() + + err = s.builder.Reset(&s.builderBuf) + if err != nil { + return 0, nil, err + } + + // write the field doc values + // NOTE: doc values continue to use legacy chunk mode + chunkSize, err := getChunkSize(LegacyChunkMode, 0, 0) + if err != nil { + return 0, nil, err + } + fdvEncoder := newChunkedContentCoder(chunkSize, uint64(len(s.results)-1), s.w, false) + if s.IncludeDocValues[fieldID] { + for docNum, docTerms := range docTermMap { + if len(docTerms) > 0 { + err = fdvEncoder.Add(uint64(docNum), docTerms) + if err != nil { + return 0, nil, err + } + } + } + err = fdvEncoder.Close() + if err != nil { + return 0, nil, err + } + + fdvOffsetsStart[fieldID] = uint64(s.w.Count()) + + _, err = fdvEncoder.Write() + if err != nil { + return 0, nil, err + } + + fdvOffsetsEnd[fieldID] = uint64(s.w.Count()) + + fdvEncoder.Reset() + } else { + fdvOffsetsStart[fieldID] = fieldNotUninverted + fdvOffsetsEnd[fieldID] = fieldNotUninverted + } + } + + fdvIndexOffset = uint64(s.w.Count()) + + for i := 0; i < len(fdvOffsetsStart); i++ { + n := binary.PutUvarint(buf, fdvOffsetsStart[i]) + _, err := s.w.Write(buf[:n]) + if err != nil { + return 0, nil, err + } + n = binary.PutUvarint(buf, fdvOffsetsEnd[i]) + _, err = s.w.Write(buf[:n]) + if err != nil { + return 0, nil, err + } + } + + return fdvIndexOffset, dictOffsets, nil +} + +func encodeFieldType(f document.Field) byte { + fieldType := byte('x') + switch f.(type) { + case *document.TextField: + fieldType = 't' + case *document.NumericField: + fieldType = 'n' + case *document.DateTimeField: + fieldType = 'd' + case *document.BooleanField: + fieldType = 'b' + case *document.GeoPointField: + fieldType = 'g' + case *document.CompositeField: + fieldType = 'c' + } + return fieldType +} + +// returns the total # of bytes needed to encode the given uint64's +// into binary.PutUVarint() encoding +func totalUvarintBytes(a, b, c, d, e uint64, more []uint64) (n int) { + n = numUvarintBytes(a) + n += numUvarintBytes(b) + n += numUvarintBytes(c) + n += numUvarintBytes(d) + n += numUvarintBytes(e) + for _, v := range more { + n += numUvarintBytes(v) + } + return n +} + +// returns # of bytes needed to encode x in binary.PutUvarint() encoding +func numUvarintBytes(x uint64) (n int) { + for x >= 0x80 { + x >>= 7 + n++ + } + return n + 1 +} diff --git a/vendor/github.com/blevesearch/zap/v12/plugin.go b/vendor/github.com/blevesearch/zap/v12/plugin.go new file mode 100644 index 0000000000..38a0638d4d --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/plugin.go @@ -0,0 +1,37 @@ +// Copyright (c) 2020 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "github.com/blevesearch/bleve/index/scorch/segment" +) + +// ZapPlugin implements the Plugin interface of +// the blevesearch/bleve/index/scorch/segment pkg +type ZapPlugin struct{} + +func (*ZapPlugin) Type() string { + return Type +} + +func (*ZapPlugin) Version() uint32 { + return Version +} + +// Plugin returns an instance segment.Plugin for use +// by the Scorch indexing scheme +func Plugin() segment.Plugin { + return &ZapPlugin{} +} diff --git a/vendor/github.com/blevesearch/zap/v12/posting.go b/vendor/github.com/blevesearch/zap/v12/posting.go new file mode 100644 index 0000000000..3a6ee5483a --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/posting.go @@ -0,0 +1,798 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + + "github.com/RoaringBitmap/roaring" + "github.com/blevesearch/bleve/index/scorch/segment" + "github.com/blevesearch/bleve/size" +) + +var reflectStaticSizePostingsList int +var reflectStaticSizePostingsIterator int +var reflectStaticSizePosting int +var reflectStaticSizeLocation int + +func init() { + var pl PostingsList + reflectStaticSizePostingsList = int(reflect.TypeOf(pl).Size()) + var pi PostingsIterator + reflectStaticSizePostingsIterator = int(reflect.TypeOf(pi).Size()) + var p Posting + reflectStaticSizePosting = int(reflect.TypeOf(p).Size()) + var l Location + reflectStaticSizeLocation = int(reflect.TypeOf(l).Size()) +} + +// FST or vellum value (uint64) encoding is determined by the top two +// highest-order or most significant bits... +// +// encoding : MSB +// name : 63 62 61...to...bit #0 (LSB) +// ----------+---+---+--------------------------------------------------- +// general : 0 | 0 | 62-bits of postingsOffset. +// ~ : 0 | 1 | reserved for future. +// 1-hit : 1 | 0 | 31-bits of positive float31 norm | 31-bits docNum. +// ~ : 1 | 1 | reserved for future. +// +// Encoding "general" is able to handle all cases, where the +// postingsOffset points to more information about the postings for +// the term. +// +// Encoding "1-hit" is used to optimize a commonly seen case when a +// term has only a single hit. For example, a term in the _id field +// will have only 1 hit. The "1-hit" encoding is used for a term +// in a field when... +// +// - term vector info is disabled for that field; +// - and, the term appears in only a single doc for that field; +// - and, the term's freq is exactly 1 in that single doc for that field; +// - and, the docNum must fit into 31-bits; +// +// Otherwise, the "general" encoding is used instead. +// +// In the "1-hit" encoding, the field in that single doc may have +// other terms, which is supported in the "1-hit" encoding by the +// positive float31 norm. + +const FSTValEncodingMask = uint64(0xc000000000000000) +const FSTValEncodingGeneral = uint64(0x0000000000000000) +const FSTValEncoding1Hit = uint64(0x8000000000000000) + +func FSTValEncode1Hit(docNum uint64, normBits uint64) uint64 { + return FSTValEncoding1Hit | ((mask31Bits & normBits) << 31) | (mask31Bits & docNum) +} + +func FSTValDecode1Hit(v uint64) (docNum uint64, normBits uint64) { + return (mask31Bits & v), (mask31Bits & (v >> 31)) +} + +const mask31Bits = uint64(0x000000007fffffff) + +func under32Bits(x uint64) bool { + return x <= mask31Bits +} + +const DocNum1HitFinished = math.MaxUint64 + +var NormBits1Hit = uint64(math.Float32bits(float32(1))) + +// PostingsList is an in-memory representation of a postings list +type PostingsList struct { + sb *SegmentBase + postingsOffset uint64 + freqOffset uint64 + locOffset uint64 + postings *roaring.Bitmap + except *roaring.Bitmap + + // when normBits1Hit != 0, then this postings list came from a + // 1-hit encoding, and only the docNum1Hit & normBits1Hit apply + docNum1Hit uint64 + normBits1Hit uint64 +} + +// represents an immutable, empty postings list +var emptyPostingsList = &PostingsList{} + +func (p *PostingsList) Size() int { + sizeInBytes := reflectStaticSizePostingsList + size.SizeOfPtr + + if p.except != nil { + sizeInBytes += int(p.except.GetSizeInBytes()) + } + + return sizeInBytes +} + +func (p *PostingsList) OrInto(receiver *roaring.Bitmap) { + if p.normBits1Hit != 0 { + receiver.Add(uint32(p.docNum1Hit)) + return + } + + if p.postings != nil { + receiver.Or(p.postings) + } +} + +// Iterator returns an iterator for this postings list +func (p *PostingsList) Iterator(includeFreq, includeNorm, includeLocs bool, + prealloc segment.PostingsIterator) segment.PostingsIterator { + if p.normBits1Hit == 0 && p.postings == nil { + return emptyPostingsIterator + } + + var preallocPI *PostingsIterator + pi, ok := prealloc.(*PostingsIterator) + if ok && pi != nil { + preallocPI = pi + } + if preallocPI == emptyPostingsIterator { + preallocPI = nil + } + + return p.iterator(includeFreq, includeNorm, includeLocs, preallocPI) +} + +func (p *PostingsList) iterator(includeFreq, includeNorm, includeLocs bool, + rv *PostingsIterator) *PostingsIterator { + if rv == nil { + rv = &PostingsIterator{} + } else { + freqNormReader := rv.freqNormReader + if freqNormReader != nil { + freqNormReader.reset() + } + + locReader := rv.locReader + if locReader != nil { + locReader.reset() + } + + nextLocs := rv.nextLocs[:0] + nextSegmentLocs := rv.nextSegmentLocs[:0] + + buf := rv.buf + + *rv = PostingsIterator{} // clear the struct + + rv.freqNormReader = freqNormReader + rv.locReader = locReader + + rv.nextLocs = nextLocs + rv.nextSegmentLocs = nextSegmentLocs + + rv.buf = buf + } + + rv.postings = p + rv.includeFreqNorm = includeFreq || includeNorm || includeLocs + rv.includeLocs = includeLocs + + if p.normBits1Hit != 0 { + // "1-hit" encoding + rv.docNum1Hit = p.docNum1Hit + rv.normBits1Hit = p.normBits1Hit + + if p.except != nil && p.except.Contains(uint32(rv.docNum1Hit)) { + rv.docNum1Hit = DocNum1HitFinished + } + + return rv + } + + // "general" encoding, check if empty + if p.postings == nil { + return rv + } + + // initialize freq chunk reader + if rv.includeFreqNorm { + rv.freqNormReader = newChunkedIntDecoder(p.sb.mem, p.freqOffset) + } + + // initialize the loc chunk reader + if rv.includeLocs { + rv.locReader = newChunkedIntDecoder(p.sb.mem, p.locOffset) + } + + rv.all = p.postings.Iterator() + if p.except != nil { + rv.ActualBM = roaring.AndNot(p.postings, p.except) + rv.Actual = rv.ActualBM.Iterator() + } else { + rv.ActualBM = p.postings + rv.Actual = rv.all // Optimize to use same iterator for all & Actual. + } + + return rv +} + +// Count returns the number of items on this postings list +func (p *PostingsList) Count() uint64 { + var n, e uint64 + if p.normBits1Hit != 0 { + n = 1 + if p.except != nil && p.except.Contains(uint32(p.docNum1Hit)) { + e = 1 + } + } else if p.postings != nil { + n = p.postings.GetCardinality() + if p.except != nil { + e = p.postings.AndCardinality(p.except) + } + } + return n - e +} + +func (rv *PostingsList) read(postingsOffset uint64, d *Dictionary) error { + rv.postingsOffset = postingsOffset + + // handle "1-hit" encoding special case + if rv.postingsOffset&FSTValEncodingMask == FSTValEncoding1Hit { + return rv.init1Hit(postingsOffset) + } + + // read the location of the freq/norm details + var n uint64 + var read int + + rv.freqOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+binary.MaxVarintLen64]) + n += uint64(read) + + rv.locOffset, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) + n += uint64(read) + + var postingsLen uint64 + postingsLen, read = binary.Uvarint(d.sb.mem[postingsOffset+n : postingsOffset+n+binary.MaxVarintLen64]) + n += uint64(read) + + roaringBytes := d.sb.mem[postingsOffset+n : postingsOffset+n+postingsLen] + + if rv.postings == nil { + rv.postings = roaring.NewBitmap() + } + _, err := rv.postings.FromBuffer(roaringBytes) + if err != nil { + return fmt.Errorf("error loading roaring bitmap: %v", err) + } + + return nil +} + +func (rv *PostingsList) init1Hit(fstVal uint64) error { + docNum, normBits := FSTValDecode1Hit(fstVal) + + rv.docNum1Hit = docNum + rv.normBits1Hit = normBits + + return nil +} + +// PostingsIterator provides a way to iterate through the postings list +type PostingsIterator struct { + postings *PostingsList + all roaring.IntPeekable + Actual roaring.IntPeekable + ActualBM *roaring.Bitmap + + currChunk uint32 + freqNormReader *chunkedIntDecoder + locReader *chunkedIntDecoder + + next Posting // reused across Next() calls + nextLocs []Location // reused across Next() calls + nextSegmentLocs []segment.Location // reused across Next() calls + + docNum1Hit uint64 + normBits1Hit uint64 + + buf []byte + + includeFreqNorm bool + includeLocs bool +} + +var emptyPostingsIterator = &PostingsIterator{} + +func (i *PostingsIterator) Size() int { + sizeInBytes := reflectStaticSizePostingsIterator + size.SizeOfPtr + + i.next.Size() + // account for freqNormReader, locReader if we start using this. + for _, entry := range i.nextLocs { + sizeInBytes += entry.Size() + } + + return sizeInBytes +} + +func (i *PostingsIterator) loadChunk(chunk int) error { + if i.includeFreqNorm { + err := i.freqNormReader.loadChunk(chunk) + if err != nil { + return err + } + } + + if i.includeLocs { + err := i.locReader.loadChunk(chunk) + if err != nil { + return err + } + } + + i.currChunk = uint32(chunk) + return nil +} + +func (i *PostingsIterator) readFreqNormHasLocs() (uint64, uint64, bool, error) { + if i.normBits1Hit != 0 { + return 1, i.normBits1Hit, false, nil + } + + freqHasLocs, err := i.freqNormReader.readUvarint() + if err != nil { + return 0, 0, false, fmt.Errorf("error reading frequency: %v", err) + } + + freq, hasLocs := decodeFreqHasLocs(freqHasLocs) + + normBits, err := i.freqNormReader.readUvarint() + if err != nil { + return 0, 0, false, fmt.Errorf("error reading norm: %v", err) + } + + return freq, normBits, hasLocs, nil +} + +func (i *PostingsIterator) skipFreqNormReadHasLocs() (bool, error) { + if i.normBits1Hit != 0 { + return false, nil + } + + freqHasLocs, err := i.freqNormReader.readUvarint() + if err != nil { + return false, fmt.Errorf("error reading freqHasLocs: %v", err) + } + + i.freqNormReader.SkipUvarint() // Skip normBits. + + return freqHasLocs&0x01 != 0, nil // See decodeFreqHasLocs() / hasLocs. +} + +func encodeFreqHasLocs(freq uint64, hasLocs bool) uint64 { + rv := freq << 1 + if hasLocs { + rv = rv | 0x01 // 0'th LSB encodes whether there are locations + } + return rv +} + +func decodeFreqHasLocs(freqHasLocs uint64) (uint64, bool) { + freq := freqHasLocs >> 1 + hasLocs := freqHasLocs&0x01 != 0 + return freq, hasLocs +} + +// readLocation processes all the integers on the stream representing a single +// location. +func (i *PostingsIterator) readLocation(l *Location) error { + // read off field + fieldID, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading location field: %v", err) + } + // read off pos + pos, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading location pos: %v", err) + } + // read off start + start, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading location start: %v", err) + } + // read off end + end, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading location end: %v", err) + } + // read off num array pos + numArrayPos, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading location num array pos: %v", err) + } + + l.field = i.postings.sb.fieldsInv[fieldID] + l.pos = pos + l.start = start + l.end = end + + if cap(l.ap) < int(numArrayPos) { + l.ap = make([]uint64, int(numArrayPos)) + } else { + l.ap = l.ap[:int(numArrayPos)] + } + + // read off array positions + for k := 0; k < int(numArrayPos); k++ { + ap, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading array position: %v", err) + } + + l.ap[k] = ap + } + + return nil +} + +// Next returns the next posting on the postings list, or nil at the end +func (i *PostingsIterator) Next() (segment.Posting, error) { + return i.nextAtOrAfter(0) +} + +// Advance returns the posting at the specified docNum or it is not present +// the next posting, or if the end is reached, nil +func (i *PostingsIterator) Advance(docNum uint64) (segment.Posting, error) { + return i.nextAtOrAfter(docNum) +} + +// Next returns the next posting on the postings list, or nil at the end +func (i *PostingsIterator) nextAtOrAfter(atOrAfter uint64) (segment.Posting, error) { + docNum, exists, err := i.nextDocNumAtOrAfter(atOrAfter) + if err != nil || !exists { + return nil, err + } + + i.next = Posting{} // clear the struct + rv := &i.next + rv.docNum = docNum + + if !i.includeFreqNorm { + return rv, nil + } + + var normBits uint64 + var hasLocs bool + + rv.freq, normBits, hasLocs, err = i.readFreqNormHasLocs() + if err != nil { + return nil, err + } + + rv.norm = math.Float32frombits(uint32(normBits)) + + if i.includeLocs && hasLocs { + // prepare locations into reused slices, where we assume + // rv.freq >= "number of locs", since in a composite field, + // some component fields might have their IncludeTermVector + // flags disabled while other component fields are enabled + if cap(i.nextLocs) >= int(rv.freq) { + i.nextLocs = i.nextLocs[0:rv.freq] + } else { + i.nextLocs = make([]Location, rv.freq, rv.freq*2) + } + if cap(i.nextSegmentLocs) < int(rv.freq) { + i.nextSegmentLocs = make([]segment.Location, rv.freq, rv.freq*2) + } + rv.locs = i.nextSegmentLocs[:0] + + numLocsBytes, err := i.locReader.readUvarint() + if err != nil { + return nil, fmt.Errorf("error reading location numLocsBytes: %v", err) + } + + j := 0 + startBytesRemaining := i.locReader.Len() // # bytes remaining in the locReader + for startBytesRemaining-i.locReader.Len() < int(numLocsBytes) { + err := i.readLocation(&i.nextLocs[j]) + if err != nil { + return nil, err + } + rv.locs = append(rv.locs, &i.nextLocs[j]) + j++ + } + } + + return rv, nil +} + +// nextDocNum returns the next docNum on the postings list, and also +// sets up the currChunk / loc related fields of the iterator. +func (i *PostingsIterator) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool, error) { + if i.normBits1Hit != 0 { + if i.docNum1Hit == DocNum1HitFinished { + return 0, false, nil + } + if i.docNum1Hit < atOrAfter { + // advanced past our 1-hit + i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum + return 0, false, nil + } + docNum := i.docNum1Hit + i.docNum1Hit = DocNum1HitFinished // consume our 1-hit docNum + return docNum, true, nil + } + + if i.Actual == nil || !i.Actual.HasNext() { + return 0, false, nil + } + + if i.postings == nil || i.postings.postings == i.ActualBM { + return i.nextDocNumAtOrAfterClean(atOrAfter) + } + + i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) + + if !i.Actual.HasNext() { + // couldn't find anything + return 0, false, nil + } + + n := i.Actual.Next() + allN := i.all.Next() + + chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs) + if err != nil { + return 0, false, err + } + nChunk := n / uint32(chunkSize) + + // when allN becomes >= to here, then allN is in the same chunk as nChunk. + allNReachesNChunk := nChunk * uint32(chunkSize) + + // n is the next actual hit (excluding some postings), and + // allN is the next hit in the full postings, and + // if they don't match, move 'all' forwards until they do + for allN != n { + // we've reached same chunk, so move the freq/norm/loc decoders forward + if i.includeFreqNorm && allN >= allNReachesNChunk { + err := i.currChunkNext(nChunk) + if err != nil { + return 0, false, err + } + } + + allN = i.all.Next() + } + + if i.includeFreqNorm && (i.currChunk != nChunk || i.freqNormReader.isNil()) { + err := i.loadChunk(int(nChunk)) + if err != nil { + return 0, false, fmt.Errorf("error loading chunk: %v", err) + } + } + + return uint64(n), true, nil +} + +// optimization when the postings list is "clean" (e.g., no updates & +// no deletions) where the all bitmap is the same as the actual bitmap +func (i *PostingsIterator) nextDocNumAtOrAfterClean( + atOrAfter uint64) (uint64, bool, error) { + + if !i.includeFreqNorm { + i.Actual.AdvanceIfNeeded(uint32(atOrAfter)) + + if !i.Actual.HasNext() { + return 0, false, nil // couldn't find anything + } + + return uint64(i.Actual.Next()), true, nil + } + + chunkSize, err := getChunkSize(i.postings.sb.chunkMode, i.postings.postings.GetCardinality(), i.postings.sb.numDocs) + if err != nil { + return 0, false, err + } + + // freq-norm's needed, so maintain freq-norm chunk reader + sameChunkNexts := 0 // # of times we called Next() in the same chunk + n := i.Actual.Next() + nChunk := n / uint32(chunkSize) + + for uint64(n) < atOrAfter && i.Actual.HasNext() { + n = i.Actual.Next() + + nChunkPrev := nChunk + nChunk = n / uint32(chunkSize) + + if nChunk != nChunkPrev { + sameChunkNexts = 0 + } else { + sameChunkNexts += 1 + } + } + + if uint64(n) < atOrAfter { + // couldn't find anything + return 0, false, nil + } + + for j := 0; j < sameChunkNexts; j++ { + err := i.currChunkNext(nChunk) + if err != nil { + return 0, false, fmt.Errorf("error optimized currChunkNext: %v", err) + } + } + + if i.currChunk != nChunk || i.freqNormReader.isNil() { + err := i.loadChunk(int(nChunk)) + if err != nil { + return 0, false, fmt.Errorf("error loading chunk: %v", err) + } + } + + return uint64(n), true, nil +} + +func (i *PostingsIterator) currChunkNext(nChunk uint32) error { + if i.currChunk != nChunk || i.freqNormReader.isNil() { + err := i.loadChunk(int(nChunk)) + if err != nil { + return fmt.Errorf("error loading chunk: %v", err) + } + } + + // read off freq/offsets even though we don't care about them + hasLocs, err := i.skipFreqNormReadHasLocs() + if err != nil { + return err + } + + if i.includeLocs && hasLocs { + numLocsBytes, err := i.locReader.readUvarint() + if err != nil { + return fmt.Errorf("error reading location numLocsBytes: %v", err) + } + + // skip over all the location bytes + i.locReader.SkipBytes(int(numLocsBytes)) + } + + return nil +} + +// DocNum1Hit returns the docNum and true if this is "1-hit" optimized +// and the docNum is available. +func (p *PostingsIterator) DocNum1Hit() (uint64, bool) { + if p.normBits1Hit != 0 && p.docNum1Hit != DocNum1HitFinished { + return p.docNum1Hit, true + } + return 0, false +} + +// ActualBitmap returns the underlying actual bitmap +// which can be used up the stack for optimizations +func (p *PostingsIterator) ActualBitmap() *roaring.Bitmap { + return p.ActualBM +} + +// ReplaceActual replaces the ActualBM with the provided +// bitmap +func (p *PostingsIterator) ReplaceActual(abm *roaring.Bitmap) { + p.ActualBM = abm + p.Actual = abm.Iterator() +} + +// PostingsIteratorFromBitmap constructs a PostingsIterator given an +// "actual" bitmap. +func PostingsIteratorFromBitmap(bm *roaring.Bitmap, + includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { + return &PostingsIterator{ + ActualBM: bm, + Actual: bm.Iterator(), + includeFreqNorm: includeFreqNorm, + includeLocs: includeLocs, + }, nil +} + +// PostingsIteratorFrom1Hit constructs a PostingsIterator given a +// 1-hit docNum. +func PostingsIteratorFrom1Hit(docNum1Hit uint64, + includeFreqNorm, includeLocs bool) (segment.PostingsIterator, error) { + return &PostingsIterator{ + docNum1Hit: docNum1Hit, + normBits1Hit: NormBits1Hit, + includeFreqNorm: includeFreqNorm, + includeLocs: includeLocs, + }, nil +} + +// Posting is a single entry in a postings list +type Posting struct { + docNum uint64 + freq uint64 + norm float32 + locs []segment.Location +} + +func (p *Posting) Size() int { + sizeInBytes := reflectStaticSizePosting + + for _, entry := range p.locs { + sizeInBytes += entry.Size() + } + + return sizeInBytes +} + +// Number returns the document number of this posting in this segment +func (p *Posting) Number() uint64 { + return p.docNum +} + +// Frequency returns the frequencies of occurrence of this term in this doc/field +func (p *Posting) Frequency() uint64 { + return p.freq +} + +// Norm returns the normalization factor for this posting +func (p *Posting) Norm() float64 { + return float64(p.norm) +} + +// Locations returns the location information for each occurrence +func (p *Posting) Locations() []segment.Location { + return p.locs +} + +// Location represents the location of a single occurrence +type Location struct { + field string + pos uint64 + start uint64 + end uint64 + ap []uint64 +} + +func (l *Location) Size() int { + return reflectStaticSizeLocation + + len(l.field) + + len(l.ap)*size.SizeOfUint64 +} + +// Field returns the name of the field (useful in composite fields to know +// which original field the value came from) +func (l *Location) Field() string { + return l.field +} + +// Start returns the start byte offset of this occurrence +func (l *Location) Start() uint64 { + return l.start +} + +// End returns the end byte offset of this occurrence +func (l *Location) End() uint64 { + return l.end +} + +// Pos returns the 1-based phrase position of this occurrence +func (l *Location) Pos() uint64 { + return l.pos +} + +// ArrayPositions returns the array position vector associated with this occurrence +func (l *Location) ArrayPositions() []uint64 { + return l.ap +} diff --git a/vendor/github.com/blevesearch/zap/v12/read.go b/vendor/github.com/blevesearch/zap/v12/read.go new file mode 100644 index 0000000000..e47d4c6abd --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/read.go @@ -0,0 +1,43 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import "encoding/binary" + +func (s *SegmentBase) getDocStoredMetaAndCompressed(docNum uint64) ([]byte, []byte) { + _, storedOffset, n, metaLen, dataLen := s.getDocStoredOffsets(docNum) + + meta := s.mem[storedOffset+n : storedOffset+n+metaLen] + data := s.mem[storedOffset+n+metaLen : storedOffset+n+metaLen+dataLen] + + return meta, data +} + +func (s *SegmentBase) getDocStoredOffsets(docNum uint64) ( + uint64, uint64, uint64, uint64, uint64) { + indexOffset := s.storedIndexOffset + (8 * docNum) + + storedOffset := binary.BigEndian.Uint64(s.mem[indexOffset : indexOffset+8]) + + var n uint64 + + metaLen, read := binary.Uvarint(s.mem[storedOffset : storedOffset+binary.MaxVarintLen64]) + n += uint64(read) + + dataLen, read := binary.Uvarint(s.mem[storedOffset+n : storedOffset+n+binary.MaxVarintLen64]) + n += uint64(read) + + return indexOffset, storedOffset, n, metaLen, dataLen +} diff --git a/vendor/github.com/blevesearch/zap/v12/segment.go b/vendor/github.com/blevesearch/zap/v12/segment.go new file mode 100644 index 0000000000..e8b1f067fe --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/segment.go @@ -0,0 +1,572 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "os" + "sync" + "unsafe" + + "github.com/RoaringBitmap/roaring" + "github.com/blevesearch/bleve/index/scorch/segment" + "github.com/blevesearch/bleve/size" + "github.com/couchbase/vellum" + mmap "github.com/blevesearch/mmap-go" + "github.com/golang/snappy" +) + +var reflectStaticSizeSegmentBase int + +func init() { + var sb SegmentBase + reflectStaticSizeSegmentBase = int(unsafe.Sizeof(sb)) +} + +// Open returns a zap impl of a segment +func (*ZapPlugin) Open(path string) (segment.Segment, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + mm, err := mmap.Map(f, mmap.RDONLY, 0) + if err != nil { + // mmap failed, try to close the file + _ = f.Close() + return nil, err + } + + rv := &Segment{ + SegmentBase: SegmentBase{ + mem: mm[0 : len(mm)-FooterSize], + fieldsMap: make(map[string]uint16), + fieldDvReaders: make(map[uint16]*docValueReader), + fieldFSTs: make(map[uint16]*vellum.FST), + }, + f: f, + mm: mm, + path: path, + refs: 1, + } + rv.SegmentBase.updateSize() + + err = rv.loadConfig() + if err != nil { + _ = rv.Close() + return nil, err + } + + err = rv.loadFields() + if err != nil { + _ = rv.Close() + return nil, err + } + + err = rv.loadDvReaders() + if err != nil { + _ = rv.Close() + return nil, err + } + + return rv, nil +} + +// SegmentBase is a memory only, read-only implementation of the +// segment.Segment interface, using zap's data representation. +type SegmentBase struct { + mem []byte + memCRC uint32 + chunkMode uint32 + fieldsMap map[string]uint16 // fieldName -> fieldID+1 + fieldsInv []string // fieldID -> fieldName + numDocs uint64 + storedIndexOffset uint64 + fieldsIndexOffset uint64 + docValueOffset uint64 + dictLocs []uint64 + fieldDvReaders map[uint16]*docValueReader // naive chunk cache per field + fieldDvNames []string // field names cached in fieldDvReaders + size uint64 + + m sync.Mutex + fieldFSTs map[uint16]*vellum.FST +} + +func (sb *SegmentBase) Size() int { + return int(sb.size) +} + +func (sb *SegmentBase) updateSize() { + sizeInBytes := reflectStaticSizeSegmentBase + + cap(sb.mem) + + // fieldsMap + for k := range sb.fieldsMap { + sizeInBytes += (len(k) + size.SizeOfString) + size.SizeOfUint16 + } + + // fieldsInv, dictLocs + for _, entry := range sb.fieldsInv { + sizeInBytes += len(entry) + size.SizeOfString + } + sizeInBytes += len(sb.dictLocs) * size.SizeOfUint64 + + // fieldDvReaders + for _, v := range sb.fieldDvReaders { + sizeInBytes += size.SizeOfUint16 + size.SizeOfPtr + if v != nil { + sizeInBytes += v.size() + } + } + + sb.size = uint64(sizeInBytes) +} + +func (sb *SegmentBase) AddRef() {} +func (sb *SegmentBase) DecRef() (err error) { return nil } +func (sb *SegmentBase) Close() (err error) { return nil } + +// Segment implements a persisted segment.Segment interface, by +// embedding an mmap()'ed SegmentBase. +type Segment struct { + SegmentBase + + f *os.File + mm mmap.MMap + path string + version uint32 + crc uint32 + + m sync.Mutex // Protects the fields that follow. + refs int64 +} + +func (s *Segment) Size() int { + // 8 /* size of file pointer */ + // 4 /* size of version -> uint32 */ + // 4 /* size of crc -> uint32 */ + sizeOfUints := 16 + + sizeInBytes := (len(s.path) + size.SizeOfString) + sizeOfUints + + // mutex, refs -> int64 + sizeInBytes += 16 + + // do not include the mmap'ed part + return sizeInBytes + s.SegmentBase.Size() - cap(s.mem) +} + +func (s *Segment) AddRef() { + s.m.Lock() + s.refs++ + s.m.Unlock() +} + +func (s *Segment) DecRef() (err error) { + s.m.Lock() + s.refs-- + if s.refs == 0 { + err = s.closeActual() + } + s.m.Unlock() + return err +} + +func (s *Segment) loadConfig() error { + crcOffset := len(s.mm) - 4 + s.crc = binary.BigEndian.Uint32(s.mm[crcOffset : crcOffset+4]) + + verOffset := crcOffset - 4 + s.version = binary.BigEndian.Uint32(s.mm[verOffset : verOffset+4]) + if s.version != Version { + return fmt.Errorf("unsupported version %d", s.version) + } + + chunkOffset := verOffset - 4 + s.chunkMode = binary.BigEndian.Uint32(s.mm[chunkOffset : chunkOffset+4]) + + docValueOffset := chunkOffset - 8 + s.docValueOffset = binary.BigEndian.Uint64(s.mm[docValueOffset : docValueOffset+8]) + + fieldsIndexOffset := docValueOffset - 8 + s.fieldsIndexOffset = binary.BigEndian.Uint64(s.mm[fieldsIndexOffset : fieldsIndexOffset+8]) + + storedIndexOffset := fieldsIndexOffset - 8 + s.storedIndexOffset = binary.BigEndian.Uint64(s.mm[storedIndexOffset : storedIndexOffset+8]) + + numDocsOffset := storedIndexOffset - 8 + s.numDocs = binary.BigEndian.Uint64(s.mm[numDocsOffset : numDocsOffset+8]) + return nil +} + +func (s *SegmentBase) loadFields() error { + // NOTE for now we assume the fields index immediately precedes + // the footer, and if this changes, need to adjust accordingly (or + // store explicit length), where s.mem was sliced from s.mm in Open(). + fieldsIndexEnd := uint64(len(s.mem)) + + // iterate through fields index + var fieldID uint64 + for s.fieldsIndexOffset+(8*fieldID) < fieldsIndexEnd { + addr := binary.BigEndian.Uint64(s.mem[s.fieldsIndexOffset+(8*fieldID) : s.fieldsIndexOffset+(8*fieldID)+8]) + + dictLoc, read := binary.Uvarint(s.mem[addr:fieldsIndexEnd]) + n := uint64(read) + s.dictLocs = append(s.dictLocs, dictLoc) + + var nameLen uint64 + nameLen, read = binary.Uvarint(s.mem[addr+n : fieldsIndexEnd]) + n += uint64(read) + + name := string(s.mem[addr+n : addr+n+nameLen]) + s.fieldsInv = append(s.fieldsInv, name) + s.fieldsMap[name] = uint16(fieldID + 1) + + fieldID++ + } + return nil +} + +// Dictionary returns the term dictionary for the specified field +func (s *SegmentBase) Dictionary(field string) (segment.TermDictionary, error) { + dict, err := s.dictionary(field) + if err == nil && dict == nil { + return &segment.EmptyDictionary{}, nil + } + return dict, err +} + +func (sb *SegmentBase) dictionary(field string) (rv *Dictionary, err error) { + fieldIDPlus1 := sb.fieldsMap[field] + if fieldIDPlus1 > 0 { + rv = &Dictionary{ + sb: sb, + field: field, + fieldID: fieldIDPlus1 - 1, + } + + dictStart := sb.dictLocs[rv.fieldID] + if dictStart > 0 { + var ok bool + sb.m.Lock() + if rv.fst, ok = sb.fieldFSTs[rv.fieldID]; !ok { + // read the length of the vellum data + vellumLen, read := binary.Uvarint(sb.mem[dictStart : dictStart+binary.MaxVarintLen64]) + fstBytes := sb.mem[dictStart+uint64(read) : dictStart+uint64(read)+vellumLen] + rv.fst, err = vellum.Load(fstBytes) + if err != nil { + sb.m.Unlock() + return nil, fmt.Errorf("dictionary field %s vellum err: %v", field, err) + } + + sb.fieldFSTs[rv.fieldID] = rv.fst + } + + sb.m.Unlock() + rv.fstReader, err = rv.fst.Reader() + if err != nil { + return nil, fmt.Errorf("dictionary field %s vellum reader err: %v", field, err) + } + + } + } + + return rv, nil +} + +// visitDocumentCtx holds data structures that are reusable across +// multiple VisitDocument() calls to avoid memory allocations +type visitDocumentCtx struct { + buf []byte + reader bytes.Reader + arrayPos []uint64 +} + +var visitDocumentCtxPool = sync.Pool{ + New: func() interface{} { + reuse := &visitDocumentCtx{} + return reuse + }, +} + +// VisitDocument invokes the DocFieldValueVistor for each stored field +// for the specified doc number +func (s *SegmentBase) VisitDocument(num uint64, visitor segment.DocumentFieldValueVisitor) error { + vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) + defer visitDocumentCtxPool.Put(vdc) + return s.visitDocument(vdc, num, visitor) +} + +func (s *SegmentBase) visitDocument(vdc *visitDocumentCtx, num uint64, + visitor segment.DocumentFieldValueVisitor) error { + // first make sure this is a valid number in this segment + if num < s.numDocs { + meta, compressed := s.getDocStoredMetaAndCompressed(num) + + vdc.reader.Reset(meta) + + // handle _id field special case + idFieldValLen, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + idFieldVal := compressed[:idFieldValLen] + + keepGoing := visitor("_id", byte('t'), idFieldVal, nil) + if !keepGoing { + visitDocumentCtxPool.Put(vdc) + return nil + } + + // handle non-"_id" fields + compressed = compressed[idFieldValLen:] + + uncompressed, err := snappy.Decode(vdc.buf[:cap(vdc.buf)], compressed) + if err != nil { + return err + } + + for keepGoing { + field, err := binary.ReadUvarint(&vdc.reader) + if err == io.EOF { + break + } + if err != nil { + return err + } + typ, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + offset, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + l, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + numap, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + var arrayPos []uint64 + if numap > 0 { + if cap(vdc.arrayPos) < int(numap) { + vdc.arrayPos = make([]uint64, numap) + } + arrayPos = vdc.arrayPos[:numap] + for i := 0; i < int(numap); i++ { + ap, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return err + } + arrayPos[i] = ap + } + } + + value := uncompressed[offset : offset+l] + keepGoing = visitor(s.fieldsInv[field], byte(typ), value, arrayPos) + } + + vdc.buf = uncompressed + } + return nil +} + +// DocID returns the value of the _id field for the given docNum +func (s *SegmentBase) DocID(num uint64) ([]byte, error) { + if num >= s.numDocs { + return nil, nil + } + + vdc := visitDocumentCtxPool.Get().(*visitDocumentCtx) + + meta, compressed := s.getDocStoredMetaAndCompressed(num) + + vdc.reader.Reset(meta) + + // handle _id field special case + idFieldValLen, err := binary.ReadUvarint(&vdc.reader) + if err != nil { + return nil, err + } + idFieldVal := compressed[:idFieldValLen] + + visitDocumentCtxPool.Put(vdc) + + return idFieldVal, nil +} + +// Count returns the number of documents in this segment. +func (s *SegmentBase) Count() uint64 { + return s.numDocs +} + +// DocNumbers returns a bitset corresponding to the doc numbers of all the +// provided _id strings +func (s *SegmentBase) DocNumbers(ids []string) (*roaring.Bitmap, error) { + rv := roaring.New() + + if len(s.fieldsMap) > 0 { + idDict, err := s.dictionary("_id") + if err != nil { + return nil, err + } + + postingsList := emptyPostingsList + + sMax, err := idDict.fst.GetMaxKey() + if err != nil { + return nil, err + } + sMaxStr := string(sMax) + filteredIds := make([]string, 0, len(ids)) + for _, id := range ids { + if id <= sMaxStr { + filteredIds = append(filteredIds, id) + } + } + + for _, id := range filteredIds { + postingsList, err = idDict.postingsList([]byte(id), nil, postingsList) + if err != nil { + return nil, err + } + postingsList.OrInto(rv) + } + } + + return rv, nil +} + +// Fields returns the field names used in this segment +func (s *SegmentBase) Fields() []string { + return s.fieldsInv +} + +// Path returns the path of this segment on disk +func (s *Segment) Path() string { + return s.path +} + +// Close releases all resources associated with this segment +func (s *Segment) Close() (err error) { + return s.DecRef() +} + +func (s *Segment) closeActual() (err error) { + if s.mm != nil { + err = s.mm.Unmap() + } + // try to close file even if unmap failed + if s.f != nil { + err2 := s.f.Close() + if err == nil { + // try to return first error + err = err2 + } + } + return +} + +// some helpers i started adding for the command-line utility + +// Data returns the underlying mmaped data slice +func (s *Segment) Data() []byte { + return s.mm +} + +// CRC returns the CRC value stored in the file footer +func (s *Segment) CRC() uint32 { + return s.crc +} + +// Version returns the file version in the file footer +func (s *Segment) Version() uint32 { + return s.version +} + +// ChunkFactor returns the chunk factor in the file footer +func (s *Segment) ChunkMode() uint32 { + return s.chunkMode +} + +// FieldsIndexOffset returns the fields index offset in the file footer +func (s *Segment) FieldsIndexOffset() uint64 { + return s.fieldsIndexOffset +} + +// StoredIndexOffset returns the stored value index offset in the file footer +func (s *Segment) StoredIndexOffset() uint64 { + return s.storedIndexOffset +} + +// DocValueOffset returns the docValue offset in the file footer +func (s *Segment) DocValueOffset() uint64 { + return s.docValueOffset +} + +// NumDocs returns the number of documents in the file footer +func (s *Segment) NumDocs() uint64 { + return s.numDocs +} + +// DictAddr is a helper function to compute the file offset where the +// dictionary is stored for the specified field. +func (s *Segment) DictAddr(field string) (uint64, error) { + fieldIDPlus1, ok := s.fieldsMap[field] + if !ok { + return 0, fmt.Errorf("no such field '%s'", field) + } + + return s.dictLocs[fieldIDPlus1-1], nil +} + +func (s *SegmentBase) loadDvReaders() error { + if s.docValueOffset == fieldNotUninverted || s.numDocs == 0 { + return nil + } + + var read uint64 + for fieldID, field := range s.fieldsInv { + var fieldLocStart, fieldLocEnd uint64 + var n int + fieldLocStart, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) + if n <= 0 { + return fmt.Errorf("loadDvReaders: failed to read the docvalue offset start for field %d", fieldID) + } + read += uint64(n) + fieldLocEnd, n = binary.Uvarint(s.mem[s.docValueOffset+read : s.docValueOffset+read+binary.MaxVarintLen64]) + if n <= 0 { + return fmt.Errorf("loadDvReaders: failed to read the docvalue offset end for field %d", fieldID) + } + read += uint64(n) + + fieldDvReader, err := s.loadFieldDocValueReader(field, fieldLocStart, fieldLocEnd) + if err != nil { + return err + } + if fieldDvReader != nil { + s.fieldDvReaders[uint16(fieldID)] = fieldDvReader + s.fieldDvNames = append(s.fieldDvNames, field) + } + } + + return nil +} diff --git a/vendor/github.com/blevesearch/zap/v12/write.go b/vendor/github.com/blevesearch/zap/v12/write.go new file mode 100644 index 0000000000..77aefdbfc8 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/write.go @@ -0,0 +1,145 @@ +// Copyright (c) 2017 Couchbase, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zap + +import ( + "encoding/binary" + "io" + + "github.com/RoaringBitmap/roaring" +) + +// writes out the length of the roaring bitmap in bytes as varint +// then writes out the roaring bitmap itself +func writeRoaringWithLen(r *roaring.Bitmap, w io.Writer, + reuseBufVarint []byte) (int, error) { + buf, err := r.ToBytes() + if err != nil { + return 0, err + } + + var tw int + + // write out the length + n := binary.PutUvarint(reuseBufVarint, uint64(len(buf))) + nw, err := w.Write(reuseBufVarint[:n]) + tw += nw + if err != nil { + return tw, err + } + + // write out the roaring bytes + nw, err = w.Write(buf) + tw += nw + if err != nil { + return tw, err + } + + return tw, nil +} + +func persistFields(fieldsInv []string, w *CountHashWriter, dictLocs []uint64) (uint64, error) { + var rv uint64 + var fieldsOffsets []uint64 + + for fieldID, fieldName := range fieldsInv { + // record start of this field + fieldsOffsets = append(fieldsOffsets, uint64(w.Count())) + + // write out the dict location and field name length + _, err := writeUvarints(w, dictLocs[fieldID], uint64(len(fieldName))) + if err != nil { + return 0, err + } + + // write out the field name + _, err = w.Write([]byte(fieldName)) + if err != nil { + return 0, err + } + } + + // now write out the fields index + rv = uint64(w.Count()) + for fieldID := range fieldsInv { + err := binary.Write(w, binary.BigEndian, fieldsOffsets[fieldID]) + if err != nil { + return 0, err + } + } + + return rv, nil +} + +// FooterSize is the size of the footer record in bytes +// crc + ver + chunk + field offset + stored offset + num docs + docValueOffset +const FooterSize = 4 + 4 + 4 + 8 + 8 + 8 + 8 + +func persistFooter(numDocs, storedIndexOffset, fieldsIndexOffset, docValueOffset uint64, + chunkMode uint32, crcBeforeFooter uint32, writerIn io.Writer) error { + w := NewCountHashWriter(writerIn) + w.crc = crcBeforeFooter + + // write out the number of docs + err := binary.Write(w, binary.BigEndian, numDocs) + if err != nil { + return err + } + // write out the stored field index location: + err = binary.Write(w, binary.BigEndian, storedIndexOffset) + if err != nil { + return err + } + // write out the field index location + err = binary.Write(w, binary.BigEndian, fieldsIndexOffset) + if err != nil { + return err + } + // write out the fieldDocValue location + err = binary.Write(w, binary.BigEndian, docValueOffset) + if err != nil { + return err + } + // write out 32-bit chunk factor + err = binary.Write(w, binary.BigEndian, chunkMode) + if err != nil { + return err + } + // write out 32-bit version + err = binary.Write(w, binary.BigEndian, Version) + if err != nil { + return err + } + // write out CRC-32 of everything upto but not including this CRC + err = binary.Write(w, binary.BigEndian, w.crc) + if err != nil { + return err + } + return nil +} + +func writeUvarints(w io.Writer, vals ...uint64) (tw int, err error) { + buf := make([]byte, binary.MaxVarintLen64) + for _, val := range vals { + n := binary.PutUvarint(buf, val) + var nw int + nw, err = w.Write(buf[:n]) + tw += nw + if err != nil { + return tw, err + } + } + return tw, err +} diff --git a/vendor/github.com/blevesearch/zap/v12/zap.md b/vendor/github.com/blevesearch/zap/v12/zap.md new file mode 100644 index 0000000000..d74dc548b8 --- /dev/null +++ b/vendor/github.com/blevesearch/zap/v12/zap.md @@ -0,0 +1,177 @@ +# ZAP File Format + +## Legend + +### Sections + + |========| + | | section + |========| + +### Fixed-size fields + + |--------| |----| |--| |-| + | | uint64 | | uint32 | | uint16 | | uint8 + |--------| |----| |--| |-| + +### Varints + + |~~~~~~~~| + | | varint(up to uint64) + |~~~~~~~~| + +### Arbitrary-length fields + + |--------...---| + | | arbitrary-length field (string, vellum, roaring bitmap) + |--------...---| + +### Chunked data + + [--------] + [ ] + [--------] + +## Overview + +Footer section describes the configuration of particular ZAP file. The format of footer is version-dependent, so it is necessary to check `V` field before the parsing. + + |==================================================| + | Stored Fields | + |==================================================| + |-----> | Stored Fields Index | + | |==================================================| + | | Dictionaries + Postings + DocValues | + | |==================================================| + | |---> | DocValues Index | + | | |==================================================| + | | | Fields | + | | |==================================================| + | | |-> | Fields Index | + | | | |========|========|========|========|====|====|====| + | | | | D# | SF | F | FDV | CF | V | CC | (Footer) + | | | |========|====|===|====|===|====|===|====|====|====| + | | | | | | + |-+-+-----------------| | | + | |--------------------------| | + |-------------------------------------| + + D#. Number of Docs. + SF. Stored Fields Index Offset. + F. Field Index Offset. + FDV. Field DocValue Offset. + CF. Chunk Factor. + V. Version. + CC. CRC32. + +## Stored Fields + +Stored Fields Index is `D#` consecutive 64-bit unsigned integers - offsets, where relevant Stored Fields Data records are located. + + 0 [SF] [SF + D# * 8] + | Stored Fields | Stored Fields Index | + |================================|==================================| + | | | + | |--------------------| ||--------|--------|. . .|--------|| + | |-> | Stored Fields Data | || 0 | 1 | | D# - 1 || + | | |--------------------| ||--------|----|---|. . .|--------|| + | | | | | + |===|============================|==============|===================| + | | + |-------------------------------------------| + +Stored Fields Data is an arbitrary size record, which consists of metadata and [Snappy](https://github.com/golang/snappy)-compressed data. + + Stored Fields Data + |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| + | MDS | CDS | MD | CD | + |~~~~~~~~|~~~~~~~~|~~~~~~~~...~~~~~~~~|~~~~~~~~...~~~~~~~~| + + MDS. Metadata size. + CDS. Compressed data size. + MD. Metadata. + CD. Snappy-compressed data. + +## Fields + +Fields Index section located between addresses `F` and `len(file) - len(footer)` and consist of `uint64` values (`F1`, `F2`, ...) which are offsets to records in Fields section. We have `F# = (len(file) - len(footer) - F) / sizeof(uint64)` fields. + + + (...) [F] [F + F#] + | Fields | Fields Index. | + |================================|================================| + | | | + | |~~~~~~~~|~~~~~~~~|---...---|||--------|--------|...|--------|| + ||->| Dict | Length | Name ||| 0 | 1 | | F# - 1 || + || |~~~~~~~~|~~~~~~~~|---...---|||--------|----|---|...|--------|| + || | | | + ||===============================|==============|=================| + | | + |----------------------------------------------| + + +## Dictionaries + Postings + +Each of fields has its own dictionary, encoded in [Vellum](https://github.com/couchbase/vellum) format. Dictionary consists of pairs `(term, offset)`, where `offset` indicates the position of postings (list of documents) for this particular term. + + |================================================================|- Dictionaries + + | | Postings + + | | DocValues + | Freq/Norm (chunked) | + | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | + | |->[ Freq | Norm (float32 under varint) ] | + | | [~~~~~~|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~] | + | | | + | |------------------------------------------------------------| | + | Location Details (chunked) | | + | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | + | |->[ Size | Pos | Start | End | Arr# | ArrPos | ... ] | | + | | [~~~~~~|~~~~~|~~~~~~~|~~~~~|~~~~~~|~~~~~~~~|~~~~~] | | + | | | | + | |----------------------| | | + | Postings List | | | + | |~~~~~~~~|~~~~~|~~|~~~~~~~~|-----------...--| | | + | |->| F/N | LD | Length | ROARING BITMAP | | | + | | |~~~~~|~~|~~~~~~~~|~~~~~~~~|-----------...--| | | + | | |----------------------------------------------| | + | |--------------------------------------| | + | Dictionary | | + | |~~~~~~~~|--------------------------|-...-| | + | |->| Length | VELLUM DATA : (TERM -> OFFSET) | | + | | |~~~~~~~~|----------------------------...-| | + | | | + |======|=========================================================|- DocValues Index + | | | + |======|=========================================================|- Fields + | | | + | |~~~~|~~~|~~~~~~~~|---...---| | + | | Dict | Length | Name | | + | |~~~~~~~~|~~~~~~~~|---...---| | + | | + |================================================================| + +## DocValues + +DocValues Index is `F#` pairs of varints, one pair per field. Each pair of varints indicates start and end point of DocValues slice. + + |================================================================| + | |------...--| | + | |->| DocValues |<-| | + | | |------...--| | | + |==|=================|===========================================|- DocValues Index + ||~|~~~~~~~~~|~~~~~~~|~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| + || DV1 START | DV1 STOP | . . . . . | DV(F#) START | DV(F#) END || + ||~~~~~~~~~~~|~~~~~~~~~~| |~~~~~~~~~~~~~~|~~~~~~~~~~~~|| + |================================================================| + +DocValues is chunked Snappy-compressed values for each document and field. + + [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] + [ Doc# in Chunk | Doc1 | Offset1 | ... | DocN | OffsetN | SNAPPY COMPRESSED DATA ] + [~~~~~~~~~~~~~~~|~~~~~~|~~~~~~~~~|-...-|~~~~~~|~~~~~~~~~|--------------------...-] + +Last 16 bytes are description of chunks. + + |~~~~~~~~~~~~...~|----------------|----------------| + | Chunk Sizes | Chunk Size Arr | Chunk# | + |~~~~~~~~~~~~...~|----------------|----------------| diff --git a/vendor/github.com/couchbase/vellum/go.mod b/vendor/github.com/couchbase/vellum/go.mod index 0e304159d4..ef9d9abfb0 100644 --- a/vendor/github.com/couchbase/vellum/go.mod +++ b/vendor/github.com/couchbase/vellum/go.mod @@ -3,7 +3,7 @@ module github.com/couchbase/vellum go 1.12 require ( - github.com/edsrzf/mmap-go v1.0.0 + github.com/blevesearch/mmap-go v1.0.2 github.com/spf13/cobra v0.0.5 github.com/willf/bitset v1.1.10 golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // indirect diff --git a/vendor/github.com/couchbase/vellum/go.sum b/vendor/github.com/couchbase/vellum/go.sum index f14998530d..1022e684eb 100644 --- a/vendor/github.com/couchbase/vellum/go.sum +++ b/vendor/github.com/couchbase/vellum/go.sum @@ -1,12 +1,12 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0= +github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -32,6 +32,7 @@ github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPy github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/couchbase/vellum/vellum_mmap.go b/vendor/github.com/couchbase/vellum/vellum_mmap.go index 5acd2f4707..81ea165091 100644 --- a/vendor/github.com/couchbase/vellum/vellum_mmap.go +++ b/vendor/github.com/couchbase/vellum/vellum_mmap.go @@ -19,7 +19,7 @@ package vellum import ( "os" - mmap "github.com/edsrzf/mmap-go" + mmap "github.com/blevesearch/mmap-go" ) type mmapWrapper struct { diff --git a/vendor/github.com/etcd-io/bbolt/bolt_arm.go b/vendor/github.com/etcd-io/bbolt/bolt_arm.go deleted file mode 100644 index 105d27ddb7..0000000000 --- a/vendor/github.com/etcd-io/bbolt/bolt_arm.go +++ /dev/null @@ -1,28 +0,0 @@ -package bbolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go index 62df7e3b8b..e810e6fea1 100644 --- a/vendor/github.com/golang/protobuf/proto/buffer.go +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -33,8 +33,8 @@ func SizeVarint(v uint64) int { return protowire.SizeVarint(v) } -// DecodeVarint parses a varint encoded integer from b, returning the -// integer value and the length of the varint. +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. // It returns (0, 0) if there is a parse error. func DecodeVarint(b []byte) (uint64, int) { v, n := protowire.ConsumeVarint(b) @@ -112,9 +112,9 @@ func (b *Buffer) Marshal(m Message) error { return err } -// Unmarshal parses the wire-format message in the buffer and places the decoded results in m. -// -// Unlike proto.Unmarshal, this does not reset the message before starting to unmarshal. +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. func (b *Buffer) Unmarshal(m Message) error { err := UnmarshalMerge(b.Unread(), m) b.idx = len(b.buf) @@ -260,7 +260,7 @@ func (b *Buffer) DecodeStringBytes() (string, error) { } // DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m. +// It does not reset m before unmarshaling. func (b *Buffer) DecodeMessage(m Message) error { v, err := b.DecodeRawBytes(false) if err != nil { @@ -272,7 +272,7 @@ func (b *Buffer) DecodeMessage(m Message) error { // DecodeGroup consumes a message group from the buffer. // It assumes that the start group marker has already been consumed and // consumes all bytes until (and including the end group marker). -// It does not reset m. +// It does not reset m before unmarshaling. func (b *Buffer) DecodeGroup(m Message) error { v, n, err := consumeGroup(b.buf[b.idx:]) if err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 5ed131c57d..42fc120c97 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -68,7 +68,7 @@ func HasExtension(m Message, xt *ExtensionDesc) (has bool) { return has } -// ClearExtension removes the the exntesion field from m +// ClearExtension removes the extension field from m // either as an explicitly populated field or as an unknown field. func ClearExtension(m Message, xt *ExtensionDesc) { mr := MessageReflect(m) @@ -108,7 +108,7 @@ func ClearAllExtensions(m Message) { clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index abab110a06..1e7ff64205 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -29,7 +29,7 @@ var fileCache sync.Map // map[filePath]fileDescGZIP // RegisterFile is called from generated code to register the compressed // FileDescriptorProto with the file path for a proto source file. // -// Deprecated: Use protoregistry.GlobalFiles.Register instead. +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. func RegisterFile(s filePath, d fileDescGZIP) { // Decompress the descriptor. zr, err := gzip.NewReader(bytes.NewReader(d)) @@ -53,7 +53,7 @@ func RegisterFile(s filePath, d fileDescGZIP) { // FileDescriptor returns the compressed FileDescriptorProto given the file path // for a proto source file. It returns nil if not found. // -// Deprecated: Use protoregistry.GlobalFiles.RangeFilesByPath instead. +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. func FileDescriptor(s filePath) fileDescGZIP { if v, ok := fileCache.Load(s); ok { return v.(fileDescGZIP) @@ -98,7 +98,7 @@ var numFilesCache sync.Map // map[protoreflect.FullName]int // RegisterEnum is called from the generated code to register the mapping of // enum value names to enum numbers for the enum identified by s. // -// Deprecated: Use protoregistry.GlobalTypes.Register instead. +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { if _, ok := enumCache.Load(s); ok { panic("proto: duplicate enum registered: " + s) @@ -181,7 +181,7 @@ var messageTypeCache sync.Map // map[messageName]reflect.Type // RegisterType is called from generated code to register the message Go type // for a message of the given name. // -// Deprecated: Use protoregistry.GlobalTypes.Register instead. +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. func RegisterType(m Message, s messageName) { mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { @@ -280,7 +280,7 @@ func MessageName(m Message) messageName { // RegisterExtension is called from the generated code to register // the extension descriptor. // -// Deprecated: Use protoregistry.GlobalTypes.Register instead. +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. func RegisterExtension(d *ExtensionDesc) { if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { panic(err) diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go index 7ac02e68f6..a31134eeb3 100644 --- a/vendor/github.com/golang/protobuf/proto/text_encode.go +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -94,16 +94,16 @@ var ( ) // MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } // MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } // CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } // CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } var ( newline = []byte("\n") diff --git a/vendor/github.com/mschoch/smat/go.mod b/vendor/github.com/mschoch/smat/go.mod new file mode 100644 index 0000000000..70c9eed57e --- /dev/null +++ b/vendor/github.com/mschoch/smat/go.mod @@ -0,0 +1,3 @@ +module github.com/mschoch/smat + +go 1.13 diff --git a/vendor/github.com/steveyen/gtreap/go.mod b/vendor/github.com/steveyen/gtreap/go.mod new file mode 100644 index 0000000000..328430b210 --- /dev/null +++ b/vendor/github.com/steveyen/gtreap/go.mod @@ -0,0 +1,3 @@ +module github.com/steveyen/gtreap + +go 1.13 diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go index 4325860ada..77601e52c3 100644 --- a/vendor/github.com/tinylib/msgp/msgp/json.go +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -466,7 +466,23 @@ func rwquoted(dst jsWriter, s []byte) (n int, err error) { return } n++ + case '\t': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('t') + if err != nil { + return + } + n++ default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // It also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. nn, err = dst.WriteString(`\u00`) n += nn if err != nil { @@ -495,16 +511,23 @@ func rwquoted(dst jsWriter, s []byte) (n int, err error) { if err != nil { return } - nn, err = dst.WriteString(`\ufffd`) - n += nn - if err != nil { - return - } - i += size - start = i - continue } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. if c == '\u2028' || c == '\u2029' { if start < i { nn, err = dst.Write(s[start:i]) @@ -512,17 +535,20 @@ func rwquoted(dst jsWriter, s []byte) (n int, err error) { if err != nil { return } - nn, err = dst.WriteString(`\u202`) - n += nn - if err != nil { - return - } - err = dst.WriteByte(hex[c&0xF]) - if err != nil { - return - } - n++ } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + i += size + start = i + continue } i += size } diff --git a/vendor/github.com/etcd-io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore similarity index 100% rename from vendor/github.com/etcd-io/bbolt/.gitignore rename to vendor/go.etcd.io/bbolt/.gitignore diff --git a/vendor/github.com/etcd-io/bbolt/.travis.yml b/vendor/go.etcd.io/bbolt/.travis.yml similarity index 96% rename from vendor/github.com/etcd-io/bbolt/.travis.yml rename to vendor/go.etcd.io/bbolt/.travis.yml index a60300c558..257dfdfee4 100644 --- a/vendor/github.com/etcd-io/bbolt/.travis.yml +++ b/vendor/go.etcd.io/bbolt/.travis.yml @@ -4,7 +4,7 @@ go_import_path: go.etcd.io/bbolt sudo: false go: -- 1.11 +- 1.12 before_install: - go get -v honnef.co/go/tools/... diff --git a/vendor/github.com/etcd-io/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE similarity index 100% rename from vendor/github.com/etcd-io/bbolt/LICENSE rename to vendor/go.etcd.io/bbolt/LICENSE diff --git a/vendor/github.com/etcd-io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile similarity index 100% rename from vendor/github.com/etcd-io/bbolt/Makefile rename to vendor/go.etcd.io/bbolt/Makefile diff --git a/vendor/github.com/etcd-io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md similarity index 98% rename from vendor/github.com/etcd-io/bbolt/README.md rename to vendor/go.etcd.io/bbolt/README.md index e9989efc50..2dff3761da 100644 --- a/vendor/github.com/etcd-io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -275,7 +275,7 @@ should be writable. ### Using buckets Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +bucket must be unique. You can create a bucket using the `Tx.CreateBucket()` function: ```go @@ -923,6 +923,7 @@ Below is a list of public, open source projects that use Bolt: * [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. * [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more) * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. @@ -935,6 +936,7 @@ Below is a list of public, open source projects that use Bolt: * [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. * [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. * [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. diff --git a/vendor/github.com/etcd-io/bbolt/bolt_386.go b/vendor/go.etcd.io/bbolt/bolt_386.go similarity index 72% rename from vendor/github.com/etcd-io/bbolt/bolt_386.go rename to vendor/go.etcd.io/bbolt/bolt_386.go index 4d35ee7cf3..aee25960ff 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_386.go +++ b/vendor/go.etcd.io/bbolt/bolt_386.go @@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_amd64.go b/vendor/go.etcd.io/bbolt/bolt_amd64.go similarity index 73% rename from vendor/github.com/etcd-io/bbolt/bolt_amd64.go rename to vendor/go.etcd.io/bbolt/bolt_amd64.go index 60a52dad56..5dd8f3f2ae 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_amd64.go +++ b/vendor/go.etcd.io/bbolt/bolt_amd64.go @@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/go.etcd.io/bbolt/bolt_arm.go b/vendor/go.etcd.io/bbolt/bolt_arm.go new file mode 100644 index 0000000000..aee25960ff --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_arm.go @@ -0,0 +1,7 @@ +package bbolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/etcd-io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go similarity index 75% rename from vendor/github.com/etcd-io/bbolt/bolt_arm64.go rename to vendor/go.etcd.io/bbolt/bolt_arm64.go index f5aa2a5ee2..810dfd55c5 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_arm64.go +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/bolt_linux.go rename to vendor/go.etcd.io/bbolt/bolt_linux.go diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go similarity index 75% rename from vendor/github.com/etcd-io/bbolt/bolt_mips64x.go rename to vendor/go.etcd.io/bbolt/bolt_mips64x.go index baeb289fd9..dd8ffe1239 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_mips64x.go +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -7,6 +7,3 @@ const maxMapSize = 0x8000000000 // 512GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go similarity index 74% rename from vendor/github.com/etcd-io/bbolt/bolt_mipsx.go rename to vendor/go.etcd.io/bbolt/bolt_mipsx.go index 2d9b1a91f3..a669703a4e 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_mipsx.go +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -7,6 +7,3 @@ const maxMapSize = 0x40000000 // 1GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/bolt_openbsd.go rename to vendor/go.etcd.io/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go similarity index 74% rename from vendor/github.com/etcd-io/bbolt/bolt_ppc.go rename to vendor/go.etcd.io/bbolt/bolt_ppc.go index 69804714aa..84e545ef3e 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -7,6 +7,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go similarity index 75% rename from vendor/github.com/etcd-io/bbolt/bolt_ppc64.go rename to vendor/go.etcd.io/bbolt/bolt_ppc64.go index 3565908576..a76120908c 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc64.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go similarity index 75% rename from vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go rename to vendor/go.etcd.io/bbolt/bolt_ppc64le.go index 422c7c69d6..c830f2fc77 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_ppc64le.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go similarity index 75% rename from vendor/github.com/etcd-io/bbolt/bolt_riscv64.go rename to vendor/go.etcd.io/bbolt/bolt_riscv64.go index 07b4b47cdb..c967613b00 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_riscv64.go +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = true diff --git a/vendor/github.com/etcd-io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go similarity index 75% rename from vendor/github.com/etcd-io/bbolt/bolt_s390x.go rename to vendor/go.etcd.io/bbolt/bolt_s390x.go index 6d3fcb825d..ff2a560970 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_s390x.go +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/github.com/etcd-io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go similarity index 98% rename from vendor/github.com/etcd-io/bbolt/bolt_unix.go rename to vendor/go.etcd.io/bbolt/bolt_unix.go index 5f2bb51451..2938fed584 100644 --- a/vendor/github.com/etcd-io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,4 +1,4 @@ -// +build !windows,!plan9,!solaris +// +build !windows,!plan9,!solaris,!aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go new file mode 100644 index 0000000000..a64c16f512 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_unix_aix.go @@ -0,0 +1,90 @@ +// +build aix + +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/bolt_unix_solaris.go rename to vendor/go.etcd.io/bbolt/bolt_unix_solaris.go diff --git a/vendor/github.com/etcd-io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/bolt_windows.go rename to vendor/go.etcd.io/bbolt/bolt_windows.go diff --git a/vendor/github.com/etcd-io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/boltsync_unix.go rename to vendor/go.etcd.io/bbolt/boltsync_unix.go diff --git a/vendor/github.com/etcd-io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go similarity index 95% rename from vendor/github.com/etcd-io/bbolt/bucket.go rename to vendor/go.etcd.io/bbolt/bucket.go index 84bfd4d6a2..d8750b1487 100644 --- a/vendor/github.com/etcd-io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -123,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - + // Unaligned access requires a copy to be made. + const unalignedMask = unsafe.Alignof(struct { + bucket + page + }{}) - 1 + unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { value = cloneBytes(value) } @@ -206,7 +208,7 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { } // DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +// Returns an error if the bucket does not exist, or if the key represents a non-bucket value. func (b *Bucket) DeleteBucket(key []byte) error { if b.tx.db == nil { return ErrTxClosed @@ -228,7 +230,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Recursively delete all child buckets. child := b.Bucket(key) err := child.ForEach(func(k, v []byte) error { - if v == nil { + if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -409,7 +411,7 @@ func (b *Bucket) Stats() BucketStats { if p.count != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) + used += leafPageElementSize * uintptr(p.count-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position @@ -417,16 +419,16 @@ func (b *Bucket) Stats() BucketStats { // of all previous elements' keys and values. // It also includes the last element's header. lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) } if b.root == 0 { // For inlined bucket just update the inline stats - s.InlineBucketInuse += used + s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ - s.LeafInuse += used + s.LeafInuse += int(used) s.LeafOverflowN += int(p.overflow) // Collect stats from sub-buckets. @@ -447,13 +449,13 @@ func (b *Bucket) Stats() BucketStats { // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used + used += uintptr(lastElement.pos + lastElement.ksize) + s.BranchInuse += int(used) s.BranchOverflowN += int(p.overflow) } @@ -593,7 +595,7 @@ func (b *Bucket) inlineable() bool { // our threshold for inline bucket size. var size = pageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) + size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) if inode.flags&bucketLeafFlag != 0 { return false @@ -606,8 +608,8 @@ func (b *Bucket) inlineable() bool { } // Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 +func (b *Bucket) maxInlineBucketSize() uintptr { + return uintptr(b.tx.db.pageSize / 4) } // write allocates and writes a bucket to a byte slice. diff --git a/vendor/github.com/etcd-io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go similarity index 99% rename from vendor/github.com/etcd-io/bbolt/cursor.go rename to vendor/go.etcd.io/bbolt/cursor.go index 3000aced6c..98aeb449a4 100644 --- a/vendor/github.com/etcd-io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -366,7 +366,7 @@ func (c *Cursor) node() *node { } for _, ref := range c.stack[:len(c.stack)-1] { _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) + n = n.childAt(ref.index) } _assert(n.isLeaf, "expected leaf node") return n diff --git a/vendor/github.com/etcd-io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go similarity index 99% rename from vendor/github.com/etcd-io/bbolt/db.go rename to vendor/go.etcd.io/bbolt/db.go index 870c8b1cc9..80b0095cc3 100644 --- a/vendor/github.com/etcd-io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -206,12 +206,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - db.path = path var err error - if db.file, err = db.openFile(db.path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { _ = db.close() return nil, err } + db.path = db.file.Name() // Lock file so that other processes using Bolt in read-write mode cannot // use the database at the same time. This would cause corruption since diff --git a/vendor/github.com/etcd-io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/doc.go rename to vendor/go.etcd.io/bbolt/doc.go diff --git a/vendor/github.com/etcd-io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go similarity index 100% rename from vendor/github.com/etcd-io/bbolt/errors.go rename to vendor/go.etcd.io/bbolt/errors.go diff --git a/vendor/github.com/etcd-io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go similarity index 90% rename from vendor/github.com/etcd-io/bbolt/freelist.go rename to vendor/go.etcd.io/bbolt/freelist.go index 587b8cc02d..d441b69256 100644 --- a/vendor/github.com/etcd-io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -2,6 +2,7 @@ package bbolt import ( "fmt" + "reflect" "sort" "unsafe" ) @@ -71,7 +72,7 @@ func (f *freelist) size() int { // The first element will be used to store the count. See freelist.write. n++ } - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) + return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) } // count returns count of pages on the freelist @@ -93,8 +94,24 @@ func (f *freelist) pending_count() int { return count } -// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// copyallunsafe copies a list of all free ids and all pending ids in one sorted list. // f.count returns the minimum length required for dst. +func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + fpgids := f.getFreePageIDs() + sz := len(fpgids) + len(m) + dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(dstptr), + Len: sz, + Cap: sz, + })) + mergepgids(dst, fpgids, m) +} + func (f *freelist) copyall(dst []pgid) { m := make(pgids, 0, f.pending_count()) for _, txp := range f.pending { @@ -267,17 +284,21 @@ func (f *freelist) read(p *page) { } // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) + var idx, count uintptr = 0, uintptr(p.count) if count == 0xFFFF { idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))) } // Copy the list of page ids from the freelist. if count == 0 { f.ids = nil } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] + ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)), + Len: int(count), + Cap: int(count), + })) // copy the ids, so we don't modify on the freelist page directly idsCopy := make([]pgid, count) @@ -315,11 +336,11 @@ func (f *freelist) write(p *page) error { p.count = uint16(lenids) } else if lenids < 0xFFFF { p.count = uint16(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) } else { p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) - f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + *(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids) + f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0)))) } return nil diff --git a/vendor/github.com/etcd-io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go similarity index 99% rename from vendor/github.com/etcd-io/bbolt/freelist_hmap.go rename to vendor/go.etcd.io/bbolt/freelist_hmap.go index 6a03a6c3c8..02ef2be044 100644 --- a/vendor/github.com/etcd-io/bbolt/freelist_hmap.go +++ b/vendor/go.etcd.io/bbolt/freelist_hmap.go @@ -27,7 +27,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { f.allocs[pid] = txid for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+pgid(i)) + delete(f.cache, pid+i) } return pid } diff --git a/vendor/go.etcd.io/bbolt/go.mod b/vendor/go.etcd.io/bbolt/go.mod new file mode 100644 index 0000000000..c2366daef6 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/go.mod @@ -0,0 +1,5 @@ +module go.etcd.io/bbolt + +go 1.12 + +require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 diff --git a/vendor/go.etcd.io/bbolt/go.sum b/vendor/go.etcd.io/bbolt/go.sum new file mode 100644 index 0000000000..4ad15a488c --- /dev/null +++ b/vendor/go.etcd.io/bbolt/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/etcd-io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go similarity index 91% rename from vendor/github.com/etcd-io/bbolt/node.go rename to vendor/go.etcd.io/bbolt/node.go index 6c3fa553ea..1690eef3f7 100644 --- a/vendor/github.com/etcd-io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -3,6 +3,7 @@ package bbolt import ( "bytes" "fmt" + "reflect" "sort" "unsafe" ) @@ -41,19 +42,19 @@ func (n *node) size() int { sz, elsz := pageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) } - return sz + return int(sz) } // sizeLessThan returns true if the node is less than a given size. // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { +func (n *node) sizeLessThan(v uintptr) bool { sz, elsz := pageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) + sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) if sz >= v { return false } @@ -62,7 +63,7 @@ func (n *node) sizeLessThan(v int) bool { } // pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { +func (n *node) pageElementSize() uintptr { if n.isLeaf { return leafPageElementSize } @@ -207,39 +208,39 @@ func (n *node) write(p *page) { } // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) for i, item := range n.inodes { _assert(len(item.key) > 0, "write: zero-length inode key") // Write the page element. if n.isLeaf { elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) elem.flags = item.flags elem.ksize = uint32(len(item.key)) elem.vsize = uint32(len(item.value)) } else { elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem))) elem.ksize = uint32(len(item.key)) elem.pgid = item.pgid _assert(elem.pgid != p.id, "write: circular dependency occurred") } - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } + sz := klen + vlen + b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: bp, + Len: sz, + Cap: sz, + })) + bp += uintptr(sz) // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] + l := copy(b, item.key) + copy(b[l:], item.value) } // DEBUG ONLY: n.dump() @@ -247,7 +248,7 @@ func (n *node) write(p *page) { // split breaks up a node into multiple smaller nodes, if appropriate. // This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { +func (n *node) split(pageSize uintptr) []*node { var nodes []*node node := n @@ -270,7 +271,7 @@ func (n *node) split(pageSize int) []*node { // splitTwo breaks up a node into two smaller nodes, if appropriate. // This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { +func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { @@ -312,18 +313,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) { // splitIndex finds the position where a page will fill a given threshold. // It returns the index as well as the size of the first page. // This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { +func (n *node) splitIndex(threshold int) (index, sz uintptr) { sz = pageHeaderSize // Loop until we only have the minimum number of keys required for the second page. for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i + index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { + if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -356,7 +357,7 @@ func (n *node) spill() error { n.children = nil // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) + var nodes = n.split(uintptr(tx.db.pageSize)) for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { @@ -587,9 +588,11 @@ func (n *node) dump() { type nodes []*node -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { + return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 +} // inode represents an internal node inside of a node. // It can be used to point to elements in a page or point diff --git a/vendor/github.com/etcd-io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go similarity index 71% rename from vendor/github.com/etcd-io/bbolt/page.go rename to vendor/go.etcd.io/bbolt/page.go index bca9615f0f..b5c1699789 100644 --- a/vendor/github.com/etcd-io/bbolt/page.go +++ b/vendor/go.etcd.io/bbolt/page.go @@ -3,16 +3,17 @@ package bbolt import ( "fmt" "os" + "reflect" "sort" "unsafe" ) -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) +const pageHeaderSize = unsafe.Sizeof(page{}) const minKeysPerPage = 2 -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) +const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) const ( branchPageFlag = 0x01 @@ -32,7 +33,6 @@ type page struct { flags uint16 count uint16 overflow uint32 - ptr uintptr } // typ returns a human readable page type string used for debugging. @@ -51,13 +51,13 @@ func (p *page) typ() string { // meta returns a pointer to the metadata section of the page. func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) + return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) } // leafPageElement retrieves the leaf node by index func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n + off := uintptr(index) * unsafe.Sizeof(leafPageElement{}) + return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) } // leafPageElements retrieves a list of leaf nodes. @@ -65,12 +65,17 @@ func (p *page) leafPageElements() []leafPageElement { if p.count == 0 { return nil } - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] + return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), + Len: int(p.count), + Cap: int(p.count), + })) } // branchPageElement retrieves the branch node by index func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] + off := uintptr(index) * unsafe.Sizeof(branchPageElement{}) + return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off)) } // branchPageElements retrieves a list of branch nodes. @@ -78,12 +83,20 @@ func (p *page) branchPageElements() []branchPageElement { if p.count == 0 { return nil } - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] + return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p), + Len: int(p.count), + Cap: int(p.count), + })) } // dump writes n bytes of the page to STDERR as hex output. func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)), + Len: n, + Cap: n, + })) fmt.Fprintf(os.Stderr, "%x\n", buf) } @@ -102,8 +115,11 @@ type branchPageElement struct { // key returns a byte slice of the node key. func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), + Len: int(n.ksize), + Cap: int(n.ksize), + })) } // leafPageElement represents a node on a leaf page. @@ -116,14 +132,20 @@ type leafPageElement struct { // key returns a byte slice of the node key. func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos), + Len: int(n.ksize), + Cap: int(n.ksize), + })) } // value returns a byte slice of the node value. func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize), + Len: int(n.vsize), + Cap: int(n.vsize), + })) } // PageInfo represents human readable information about a page. diff --git a/vendor/github.com/etcd-io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go similarity index 98% rename from vendor/github.com/etcd-io/bbolt/tx.go rename to vendor/go.etcd.io/bbolt/tx.go index 2df7688c2f..13937cdbf6 100644 --- a/vendor/github.com/etcd-io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "os" + "reflect" "sort" "strings" "time" @@ -527,7 +528,7 @@ func (tx *Tx) write() error { offset := int64(p.id) * int64(tx.db.pageSize) // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + ptr := uintptr(unsafe.Pointer(p)) for { // Limit our write to our max allocation size. sz := size @@ -536,7 +537,11 @@ func (tx *Tx) write() error { } // Write chunk to disk. - buf := ptr[:sz] + buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: ptr, + Len: sz, + Cap: sz, + })) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { return err } @@ -552,7 +557,7 @@ func (tx *Tx) write() error { // Otherwise move offset forward and move pointer to next chunk. offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + ptr += uintptr(sz) } } @@ -571,7 +576,11 @@ func (tx *Tx) write() error { continue } - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: uintptr(unsafe.Pointer(p)), + Len: tx.db.pageSize, + Cap: tx.db.pageSize, + })) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go new file mode 100644 index 0000000000..e07899b909 --- /dev/null +++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeheader contains header declarations for the Go runtime's +// slice and string implementations. +// +// This package allows x/sys to use types equivalent to +// reflect.SliceHeader and reflect.StringHeader without introducing +// a dependency on the (relatively heavy) "reflect" package. +package unsafeheader + +import ( + "unsafe" +) + +// Slice is the runtime representation of a slice. +// It cannot be used safely or portably and its representation may change in a later release. +type Slice struct { + Data unsafe.Pointer + Len int + Cap int +} + +// String is the runtime representation of a string. +// It cannot be used safely or portably and its representation may change in a later release. +type String struct { + Data unsafe.Pointer + Len int +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index ab433ccfbb..579d2d7355 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -89,7 +89,7 @@ constants. Adding new syscall numbers is mostly done by running the build on a sufficiently new installation of the target OS (or updating the source checkouts for the -new build system). However, depending on the OS, you make need to update the +new build system). However, depending on the OS, you may need to update the parsing in mksysnum. ### mksyscall.go @@ -163,7 +163,7 @@ The merge is performed in the following steps: ## Generated files -### `zerror_${GOOS}_${GOARCH}.go` +### `zerrors_${GOOS}_${GOARCH}.go` A file containing all of the system's generated error numbers, error strings, signal numbers, and constants. Generated by `mkerrors.sh` (see above). diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index bc076cf629..780e387e3f 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -187,6 +187,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -200,6 +201,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -479,12 +481,13 @@ ccflags="$@" $2 ~ /^(MS|MNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || $2 ~ /^MODULE_INIT_/ || $2 !~ "NLA_TYPE_MASK" && + $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -506,7 +509,8 @@ ccflags="$@" $2 ~ /^CAP_/ || $2 ~ /^ALG_/ || $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || - $2 ~ /^FS_IOC_.*ENCRYPTION/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_VERITY_/ || $2 ~ /^FSCRYPT_/ || $2 ~ /^GRND_/ || $2 ~ /^RND/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index f911617be9..dc0befee37 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -6,7 +6,11 @@ package unix -import "unsafe" +import ( + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) //sys closedir(dir uintptr) (err error) //sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) @@ -71,6 +75,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { cnt++ continue } + reclen := int(entry.Reclen) if reclen > len(buf) { // Not enough room. Return for now. @@ -79,13 +84,15 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // restarting is O(n^2) in the length of the directory. Oh well. break } + // Copy entry into return buffer. - s := struct { - ptr unsafe.Pointer - siz int - cap int - }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen} - copy(buf, *(*[]byte)(unsafe.Pointer(&s))) + var s []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(&entry) + hdr.Cap = reclen + hdr.Len = reclen + copy(buf, s) + buf = buf[reclen:] n += reclen cnt++ diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 9a5a6ee544..0cf31acf02 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -423,6 +423,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tp *Timeval) (err error) //sysnb Getuid() (uid int) //sysnb Issetugid() (tainted bool) //sys Kqueue() (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 707ba4f59a..2724e3a512 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index fdbfb5911a..ce2e0d2497 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index f8bc4cfb1f..fc17a3f232 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -20,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 5ede3ac316..1e91ddf325 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -22,17 +22,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index bbe1abbcee..942a4bbf74 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1633,6 +1633,15 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) + +func Dup2(oldfd, newfd int) error { + // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. + if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { + return Dup3(oldfd, newfd, 0) + } + return dup2(oldfd, newfd) +} + //sys Dup3(oldfd int, newfd int, flags int) (err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) @@ -1757,6 +1766,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) +//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) +//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) @@ -2178,7 +2190,6 @@ func Klogset(typ int, arg int) (err error) { // TimerGetoverrun // TimerGettime // TimerSettime -// Timerfd // Tkill (obsolete) // Tuxcall // Umount2 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index a8374b67cf..048d18e3c8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -49,7 +49,7 @@ func Pipe2(p []int, flags int) (err error) { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 8ed1d546f0..72efe86ed4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -6,7 +6,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 99ae613733..e1913e2c93 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -80,7 +80,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 807a0b20c3..c6de6b9134 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -25,7 +25,7 @@ func EpollCreate(size int) (fd int, err error) { //sysnb Getegid() (egid int) //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 @@ -47,7 +47,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -168,6 +168,24 @@ func Pipe2(p []int, flags int) (err error) { return } +// Getrlimit prefers the prlimit64 system call. See issue 38604. +func Getrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + return getrlimit(resource, rlim) +} + +// Setrlimit prefers the prlimit64 system call. See issue 38604. +func Setrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + return setrlimit(resource, rlim) +} + func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } @@ -192,9 +210,9 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error func Pause() error { _, err := ppoll(nil, 0, nil, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index af77e6e25e..f0287476cd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index e286c6ba31..c11328111d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -14,7 +14,7 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index ca0345aabf..349374409b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index abdabbac3f..b0b1505565 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -191,10 +191,6 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err @@ -228,3 +224,7 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 533e9305e7..2363f74991 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -10,7 +10,7 @@ import ( "unsafe" ) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index d890a227bf..d389f1518f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -8,7 +8,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8f710d0140..400ba9fbc9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -12,6 +12,8 @@ import ( "sync" "syscall" "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -113,15 +115,12 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, errno } - // Slice memory layout - var sl = struct { - addr uintptr - len int - cap int - }{addr, length, length} - - // Use unsafe to turn sl into a []byte. - b := *(*[]byte)(unsafe.Pointer(&sl)) + // Use unsafe to convert addr into a []byte. + var b []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) + hdr.Data = unsafe.Pointer(addr) + hdr.Cap = length + hdr.Len = length // Register mapping in m and return it. p := &b[cap(b)-1] diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 84c599c524..6e3cfec46c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -216,6 +216,7 @@ const ( BPF_F_RDONLY = 0x8 BPF_F_RDONLY_PROG = 0x80 BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REPLACE = 0x4 BPF_F_REUSE_STACKID = 0x400 BPF_F_SEQ_NUMBER = 0x8 BPF_F_SKIP_FIELD_MASK = 0xff @@ -389,6 +390,7 @@ const ( CLONE_NEWNET = 0x40000000 CLONE_NEWNS = 0x20000 CLONE_NEWPID = 0x20000000 + CLONE_NEWTIME = 0x80 CLONE_NEWUSER = 0x10000000 CLONE_NEWUTS = 0x4000000 CLONE_PARENT = 0x8000 @@ -671,6 +673,7 @@ const ( FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617 FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 + FS_IOC_MEASURE_VERITY = 0xc0046686 FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618 FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619 FS_KEY_DESCRIPTOR_SIZE = 0x8 @@ -683,6 +686,9 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0xf + FS_VERITY_FL = 0x100000 + FS_VERITY_HASH_ALG_SHA256 = 0x1 + FS_VERITY_HASH_ALG_SHA512 = 0x2 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 @@ -733,6 +739,7 @@ const ( GENL_NAMSIZ = 0x10 GENL_START_ALLOC = 0x13 GENL_UNS_ADMIN_PERM = 0x10 + GRND_INSECURE = 0x4 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 HDIO_DRIVE_CMD = 0x31f @@ -1483,6 +1490,7 @@ const ( PR_GET_FPEMU = 0x9 PR_GET_FPEXC = 0xb PR_GET_FP_MODE = 0x2e + PR_GET_IO_FLUSHER = 0x3a PR_GET_KEEPCAPS = 0x7 PR_GET_NAME = 0x10 PR_GET_NO_NEW_PRIVS = 0x27 @@ -1518,6 +1526,7 @@ const ( PR_SET_FPEMU = 0xa PR_SET_FPEXC = 0xc PR_SET_FP_MODE = 0x2d + PR_SET_IO_FLUSHER = 0x39 PR_SET_KEEPCAPS = 0x8 PR_SET_MM = 0x23 PR_SET_MM_ARG_END = 0x9 @@ -1746,12 +1755,15 @@ const ( RTM_DELRULE = 0x21 RTM_DELTCLASS = 0x29 RTM_DELTFILTER = 0x2d + RTM_DELVLAN = 0x71 RTM_F_CLONED = 0x200 RTM_F_EQUALIZE = 0x400 RTM_F_FIB_MATCH = 0x2000 RTM_F_LOOKUP_TABLE = 0x1000 RTM_F_NOTIFY = 0x100 + RTM_F_OFFLOAD = 0x4000 RTM_F_PREFIX = 0x800 + RTM_F_TRAP = 0x8000 RTM_GETACTION = 0x32 RTM_GETADDR = 0x16 RTM_GETADDRLABEL = 0x4a @@ -1773,7 +1785,8 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6f + RTM_GETVLAN = 0x72 + RTM_MAX = 0x73 RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1788,6 +1801,7 @@ const ( RTM_NEWNETCONF = 0x50 RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 + RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -1795,8 +1809,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x18 - RTM_NR_MSGTYPES = 0x60 + RTM_NR_FAMILIES = 0x19 + RTM_NR_MSGTYPES = 0x64 RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2086,7 +2100,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 + TASKSTATS_VERSION = 0xa TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -2151,6 +2165,8 @@ const ( TCP_USER_TIMEOUT = 0x12 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 + TFD_TIMER_ABSTIME = 0x1 + TFD_TIMER_CANCEL_ON_SET = 0x2 TIMER_ABSTIME = 0x1 TIOCM_DTR = 0x2 TIOCM_LE = 0x1 @@ -2267,7 +2283,7 @@ const ( VMADDR_CID_ANY = 0xffffffff VMADDR_CID_HOST = 0x2 VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 + VMADDR_CID_LOCAL = 0x1 VMADDR_PORT_ANY = 0xffffffff VM_SOCKETS_INVALID_VERSION = 0xffffffff VQUIT = 0x1 @@ -2394,6 +2410,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 Z3FOLD_MAGIC = 0x33 + ZONEFS_MAGIC = 0x5a4f4653 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 0876cf92ff..5e974110d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -73,6 +73,8 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -340,6 +342,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index d5be2e8377..47a57fe468 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -73,6 +73,8 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -341,6 +343,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index fbeef83252..df2eea4bb7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -347,6 +349,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 06daa50ebd..4e1214217f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -75,6 +75,8 @@ const ( FFDLY = 0x8000 FLUSHO = 0x1000 FPSIMD_MAGIC = 0x46508001 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -334,6 +336,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 7c866b8f5b..a23b08029a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +339,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index c42966d19c..a5a921e43b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +339,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index a5b2b42739..d088e197bd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +339,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f91881b81..0ddf9d5fe8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -337,6 +339,8 @@ const ( TCSETSW = 0x540f TCSETSW2 = 0x8030542c TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 TIOCCBRK = 0x5428 TIOCCONS = 0x80047478 TIOCEXCL = 0x740d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 63df35597e..a93ffc1807 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -72,6 +72,8 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -391,6 +393,8 @@ const ( TCSETSF = 0x802c7416 TCSETSW = 0x802c7415 TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 7ab68f7c8a..c1ea48b95f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -72,6 +72,8 @@ const ( FF1 = 0x4000 FFDLY = 0x4000 FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -391,6 +393,8 @@ const ( TCSETSF = 0x802c7416 TCSETSW = 0x802c7415 TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index f99cf1b9e0..7def950ba5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -328,6 +330,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 613ee237e3..d39293c871 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -72,6 +72,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 @@ -401,6 +403,8 @@ const ( TCSETXF = 0x5434 TCSETXW = 0x5435 TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 TIOCCBRK = 0x5428 TIOCCONS = 0x541d TIOCEXCL = 0x540c diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1f7a68d5cc..3ff3ec681b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -76,6 +76,8 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 @@ -390,6 +392,8 @@ const ( TCSETSW = 0x8024540a TCSETSW2 = 0x802c540e TCXONC = 0x20005406 + TFD_CLOEXEC = 0x400000 + TFD_NONBLOCK = 0x4000 TIOCCBRK = 0x2000747a TIOCCONS = 0x20007424 TIOCEXCL = 0x2000740d diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c1cc0a415f..23e94d3663 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index a3fc490041..e2ffb3bed3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index f8e5c37c5c..102561730a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 50d6437e6b..c67e336e2a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index cea04e041c..d34e6df2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1682,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 63103950ca..b759757a77 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2342,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 8c3bb3a25d..8d39a09f72 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -966,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1682,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index a8709f72dd..b288612600 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -1376,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2342,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index fd2dae8e57..df217825f0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1450,6 +1450,37 @@ func Sysinfo(info *Sysinfo_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func TimerfdCreate(clockid int, flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_TIMERFD_CREATE, uintptr(clockid), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdGettime(fd int, currValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall(SYS_TIMERFD_GETTIME, uintptr(fd), uintptr(unsafe.Pointer(currValue)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall6(SYS_TIMERFD_SETTIME, uintptr(fd), uintptr(flags), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index ba63af7b08..19ebd3ff75 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -55,7 +55,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index f64adef415..5c562182a1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index ac19523e88..dc69d99c61 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -234,7 +234,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index f0d2890b16..1b897dee05 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -151,7 +151,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { +func getrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) @@ -307,7 +307,7 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { +func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index aecbbca754..49186843ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 424fb7fb60..9171d3bd2a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 28c7239cf6..82286f04f9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 84596b300a..15920621c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index de022639d6..73a42e2ccb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 888f21d37a..6b85595366 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 9bc353f0c4..d7032ab1e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -45,7 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 854e816d67..bcbbdd906e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -72,7 +72,7 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 7aae554f21..54559a8956 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -431,4 +431,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 7968439a92..054a741b7f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -353,4 +353,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3c663c69d4..307f2ba12e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1f3b4d150f..e9404dd545 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -298,4 +298,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 00da3de907..68bb6d29b8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -416,4 +416,6 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index d404fbd4d4..4e5251185f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -346,4 +346,6 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index bfbf242f33..4d9aa3003b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -346,4 +346,6 @@ const ( SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 3826f497ad..64af0707d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -416,4 +416,6 @@ const ( SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 52e3da6490..cc3c067ba3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 6141f90a82..4050ff9836 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 4f7261a884..529abb6a7f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -297,4 +297,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f47014ac05..2766500109 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -360,4 +360,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dd78abb0d6..4dc82bb249 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -374,4 +374,6 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index cb5e06c605..416f7767e7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -18,6 +18,11 @@ type ( _C_long_long int64 ) +type ItimerSpec struct { + Interval Timespec + Value Timespec +} + const ( TIME_OK = 0x0 TIME_INS = 0x1 @@ -114,7 +119,8 @@ type FscryptKeySpecifier struct { type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 - _ [9]uint32 + Key_id uint32 + _ [8]uint32 } type FscryptRemoveKeyArg struct { @@ -479,7 +485,7 @@ const ( IFLA_NEW_IFINDEX = 0x31 IFLA_MIN_MTU = 0x32 IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x35 + IFLA_MAX = 0x36 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 IFLA_INFO_XSTATS = 0x3 @@ -2291,3 +2297,49 @@ const ( DEVLINK_DPIPE_HEADER_IPV4 = 0x1 DEVLINK_DPIPE_HEADER_IPV6 = 0x2 ) + +type FsverityDigest struct { + Algorithm uint16 + Size uint16 +} + +type FsverityEnableArg struct { + Version uint32 + Hash_algorithm uint32 + Block_size uint32 + Salt_size uint32 + Salt_ptr uint64 + Sig_size uint32 + _ uint32 + Sig_ptr uint64 + _ [11]uint64 +} + +type Nhmsg struct { + Family uint8 + Scope uint8 + Protocol uint8 + Resvd uint8 + Flags uint32 +} + +type NexthopGrp struct { + Id uint32 + Weight uint8 + Resvd1 uint8 + Resvd2 uint16 +} + +const ( + NHA_UNSPEC = 0x0 + NHA_ID = 0x1 + NHA_GROUP = 0x2 + NHA_GROUP_TYPE = 0x3 + NHA_BLACKHOLE = 0x4 + NHA_OIF = 0x5 + NHA_GATEWAY = 0x6 + NHA_ENCAP_TYPE = 0x7 + NHA_ENCAP = 0x8 + NHA_GROUPS = 0x9 + NHA_MASTER = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index fc6b3fb5c4..761b67c864 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -287,6 +287,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 26c30b84d0..201fb3482d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -298,6 +298,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 814d42d543..8051b56108 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -276,6 +276,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index d9664c7135..a936f21692 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -277,6 +277,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 0d721454f5..aaca03dd7d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -281,6 +281,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ef697684d1..2e7f3b8ca4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -280,6 +280,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 485fda70be..16add5a257 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -280,6 +280,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 569477eef8..4ed2c8e54c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -281,6 +281,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 602d8b4eed..7415190997 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -287,6 +287,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 6db9a7b737..046c2debd4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -287,6 +287,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 52b5348c2e..0f2f61a6ad 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -305,6 +305,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index a111387b3a..cca1b6be27 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -300,6 +300,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 8153af1818..33a73bf183 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -282,6 +282,7 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } type cpuMask uint64 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index d777113415..82076fb74f 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -104,6 +104,35 @@ func (d *DLL) MustFindProc(name string) *Proc { return p } +// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) { + a, e := GetProcAddressByOrdinal(d.Handle, ordinal) + name := "#" + itoa(int(ordinal)) + if e != nil { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails. +func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc { + p, e := d.FindProcByOrdinal(ordinal) + if e != nil { + panic(e) + } + return p +} + // Release unloads DLL d from memory. func (d *DLL) Release() (err error) { return FreeLibrary(d.Handle) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 83b65a6661..e207c69249 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -106,6 +106,12 @@ func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { return nil, err } + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return []byte{}, nil + } + enc := encoder{internalEnc, o} err = enc.marshalMessage(m.ProtoReflect(), false) if err != nil { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 4cb8d6dfd4..e7af0fe0de 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -106,7 +106,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { var descriptorAccessors = map[reflect.Type][]string{ reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "IsPacked", "IsExtension", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go index 1629515493..6e37b59e92 100644 --- a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go @@ -61,16 +61,17 @@ const ( // Field numbers for google.protobuf.FieldDescriptorProto. const ( - FieldDescriptorProto_Name = 1 // optional string - FieldDescriptorProto_Number = 3 // optional int32 - FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label - FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type - FieldDescriptorProto_TypeName = 6 // optional string - FieldDescriptorProto_Extendee = 2 // optional string - FieldDescriptorProto_DefaultValue = 7 // optional string - FieldDescriptorProto_OneofIndex = 9 // optional int32 - FieldDescriptorProto_JsonName = 10 // optional string - FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions + FieldDescriptorProto_Name = 1 // optional string + FieldDescriptorProto_Number = 3 // optional int32 + FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label + FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type + FieldDescriptorProto_TypeName = 6 // optional string + FieldDescriptorProto_Extendee = 2 // optional string + FieldDescriptorProto_DefaultValue = 7 // optional string + FieldDescriptorProto_OneofIndex = 9 // optional int32 + FieldDescriptorProto_JsonName = 10 // optional string + FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions + FieldDescriptorProto_Proto3Optional = 17 // optional bool ) // Field numbers for google.protobuf.OneofDescriptorProto. diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go index 1cb2d74fc6..517c4e2a04 100644 --- a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go +++ b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go @@ -30,9 +30,9 @@ func Less(a, b protoreflect.FieldDescriptor) bool { return a.Number() < b.Number() } return oa.Index() < ob.Index() - case oa != nil: + case oa != nil && !oa.IsSynthetic(): return false - case ob != nil: + case ob != nil && !ob.IsSynthetic(): return true default: return a.Number() < b.Number() diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index a9fe07c216..13b7723c6c 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -77,7 +77,7 @@ func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.L2.Locations } +func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } func (fd *File) ProtoType(pref.FileDescriptor) {} func (fd *File) ProtoInternal(pragma.DoNotImplement) {} @@ -202,20 +202,21 @@ type ( L1 FieldL1 } FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind - JSONName jsonName - IsWeak bool // promoted from google.protobuf.FieldOptions - HasPacked bool // promoted from google.protobuf.FieldOptions - IsPacked bool // promoted from google.protobuf.FieldOptions - HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions - EnforceUTF8 bool // promoted from google.protobuf.FieldOptions - Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Options func() pref.ProtoMessage + Number pref.FieldNumber + Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers + Kind pref.Kind + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum pref.EnumDescriptor + Message pref.MessageDescriptor } Oneof struct { @@ -277,6 +278,12 @@ func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasPresence() bool { + return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} func (fd *Field) IsPacked() bool { if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { switch fd.L1.Kind { @@ -338,6 +345,9 @@ func (fd *Field) EnforceUTF8() bool { return fd.L0.ParentFile.L1.Syntax == pref.Proto3 } +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} func (od *Oneof) Options() pref.ProtoMessage { if f := od.L1.Options; f != nil { return f() @@ -361,12 +371,13 @@ type ( Kind pref.Kind } ExtensionL2 struct { - Options func() pref.ProtoMessage - JSONName jsonName - IsPacked bool // promoted from google.protobuf.FieldOptions - Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Options func() pref.ProtoMessage + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue + Enum pref.EnumDescriptor + Message pref.MessageDescriptor } ) @@ -376,11 +387,15 @@ func (xd *Extension) Options() pref.ProtoMessage { } return descopts.Field } -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } -func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } +func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +} func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index cdf3164686..bc215944a3 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -441,6 +441,8 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des panic("oneof type already set") } fd.L1.ContainingOneof = od + case fieldnum.FieldDescriptorProto_Proto3Optional: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -537,6 +539,13 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { num, typ, n := protowire.ConsumeTag(b) b = b[n:] switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_Proto3Optional: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) b = b[m:] diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 2c43b11734..ff198d0a15 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -5078,6 +5078,46 @@ var coderStringPtr = pointerCoderFuncs{ merge: mergeStringPtr, } +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + // sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { s := *p.StringSlice() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 370ec65a20..29ed59b445 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -31,6 +31,11 @@ type coderMessageInfo struct { needsInitCheck bool isMessageSet bool numRequiredFields uint8 + + // Include space for a number of coderFieldInfos to improve cache locality. + // The number of entries is chosen through a combination of guesswork and + // empirical testing. + coderFieldBuf [32]coderFieldInfo } type coderFieldInfo struct { @@ -53,11 +58,13 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.coderFields = make(map[protowire.Number]*coderFieldInfo) fields := mi.Desc.Fields() + preallocFields := mi.coderFieldBuf[:] for i := 0; i < fields.Len(); i++ { fd := fields.Get(i) fs := si.fieldsByNumber[fd.Number()] - if fd.ContainingOneof() != nil { + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { fs = si.oneofsByName[fd.ContainingOneof().Name()] } ft := fs.Type @@ -71,7 +78,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { var funcs pointerCoderFuncs var childMessage *MessageInfo switch { - case fd.ContainingOneof() != nil: + case isOneof: fieldOffset = offsetOf(fs, mi.Exporter) case fd.IsWeak(): fieldOffset = si.weakOffset @@ -80,7 +87,14 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { fieldOffset = offsetOf(fs, mi.Exporter) childMessage, funcs = fieldCoder(fd, ft) } - cf := &coderFieldInfo{ + var cf *coderFieldInfo + if len(preallocFields) > 0 { + cf = &preallocFields[0] + preallocFields = preallocFields[1:] + } else { + cf = new(coderFieldInfo) + } + *cf = coderFieldInfo{ num: fd.Number(), offset: fieldOffset, wiretag: wiretag, @@ -89,17 +103,16 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { funcs: funcs, mi: childMessage, validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: (fd.Cardinality() == pref.Repeated || - fd.Kind() == pref.MessageKind || - fd.Kind() == pref.GroupKind || - fd.Syntax() != pref.Proto3), + isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), isRequired: fd.Cardinality() == pref.Required, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf } for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { - mi.initOneofFieldCoders(oneofs.Get(i), si) + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } } if messageset.IsMessageSet(mi.Desc) { if !mi.extensionOffset.IsValid() { @@ -123,7 +136,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { } mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) for _, cf := range mi.orderedCoderFields { - if int(cf.num) > len(mi.denseCoderFields) { + if int(cf.num) >= len(mi.denseCoderFields) { break } mi.denseCoderFields[cf.num] = cf diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index c934c8d3ca..e899712388 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -338,6 +338,9 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer return nil, coderDoublePtr } case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } if ft.Kind() == reflect.String { return nil, coderStringPtr } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 9fc384a7a5..36a90dff38 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -162,7 +162,7 @@ func (c *boolConverter) IsValidPB(v pref.Value) bool { return ok } func (c *boolConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *boolConverter) New() pref.Value { return c.def } func (c *boolConverter) Zero() pref.Value { return c.def } @@ -186,7 +186,7 @@ func (c *int32Converter) IsValidPB(v pref.Value) bool { return ok } func (c *int32Converter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *int32Converter) New() pref.Value { return c.def } func (c *int32Converter) Zero() pref.Value { return c.def } @@ -210,7 +210,7 @@ func (c *int64Converter) IsValidPB(v pref.Value) bool { return ok } func (c *int64Converter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *int64Converter) New() pref.Value { return c.def } func (c *int64Converter) Zero() pref.Value { return c.def } @@ -234,7 +234,7 @@ func (c *uint32Converter) IsValidPB(v pref.Value) bool { return ok } func (c *uint32Converter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *uint32Converter) New() pref.Value { return c.def } func (c *uint32Converter) Zero() pref.Value { return c.def } @@ -258,7 +258,7 @@ func (c *uint64Converter) IsValidPB(v pref.Value) bool { return ok } func (c *uint64Converter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *uint64Converter) New() pref.Value { return c.def } func (c *uint64Converter) Zero() pref.Value { return c.def } @@ -282,7 +282,7 @@ func (c *float32Converter) IsValidPB(v pref.Value) bool { return ok } func (c *float32Converter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *float32Converter) New() pref.Value { return c.def } func (c *float32Converter) Zero() pref.Value { return c.def } @@ -306,7 +306,7 @@ func (c *float64Converter) IsValidPB(v pref.Value) bool { return ok } func (c *float64Converter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *float64Converter) New() pref.Value { return c.def } func (c *float64Converter) Zero() pref.Value { return c.def } @@ -336,7 +336,7 @@ func (c *stringConverter) IsValidPB(v pref.Value) bool { return ok } func (c *stringConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *stringConverter) New() pref.Value { return c.def } func (c *stringConverter) Zero() pref.Value { return c.def } @@ -363,7 +363,7 @@ func (c *bytesConverter) IsValidPB(v pref.Value) bool { return ok } func (c *bytesConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *bytesConverter) New() pref.Value { return c.def } func (c *bytesConverter) Zero() pref.Value { return c.def } @@ -400,7 +400,7 @@ func (c *enumConverter) IsValidPB(v pref.Value) bool { } func (c *enumConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *enumConverter) New() pref.Value { @@ -455,7 +455,7 @@ func (c *messageConverter) IsValidPB(v pref.Value) bool { } func (c *messageConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *messageConverter) New() pref.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index fe9384aba3..6fccab520e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -22,7 +22,7 @@ func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { } type listConverter struct { - goType reflect.Type + goType reflect.Type // []T c Converter } @@ -48,11 +48,11 @@ func (c *listConverter) IsValidPB(v pref.Value) bool { if !ok { return false } - return list.v.Type().Elem() == c.goType && list.IsValid() + return list.v.Type().Elem() == c.goType } func (c *listConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *listConverter) New() pref.Value { @@ -64,7 +64,7 @@ func (c *listConverter) Zero() pref.Value { } type listPtrConverter struct { - goType reflect.Type + goType reflect.Type // *[]T c Converter } @@ -88,7 +88,7 @@ func (c *listPtrConverter) IsValidPB(v pref.Value) bool { } func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *listPtrConverter) New() pref.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 3ef36d3f23..de06b2593f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -12,7 +12,7 @@ import ( ) type mapConverter struct { - goType reflect.Type + goType reflect.Type // map[K]V keyConv, valConv Converter } @@ -43,11 +43,11 @@ func (c *mapConverter) IsValidPB(v pref.Value) bool { if !ok { return false } - return mapv.v.Type() == c.goType && mapv.IsValid() + return mapv.v.Type() == c.goType } func (c *mapConverter) IsValidGo(v reflect.Value) bool { - return v.Type() == c.goType + return v.IsValid() && v.Type() == c.goType } func (c *mapConverter) New() pref.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index 94f45725f0..c3d741c2f0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -7,14 +7,12 @@ package impl import ( "encoding/binary" "encoding/json" - "fmt" "hash/crc32" "math" "reflect" "google.golang.org/protobuf/internal/errors" pref "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" piface "google.golang.org/protobuf/runtime/protoiface" ) @@ -92,13 +90,3 @@ func (Export) CompressGZIP(in []byte) (out []byte) { out = append(out, gzipFooter[:]...) return out } - -// WeakNil returns a typed nil pointer to a concrete message. -// It panics if the message is not linked into the binary. -func (Export) WeakNil(s pref.FullName) piface.MessageV1 { - mt, err := protoregistry.GlobalTypes.FindMessageByName(s) - if err != nil { - panic(fmt.Sprintf("weak message %v is not linked in", s)) - } - return mt.Zero().Interface().(piface.MessageV1) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 93c318f9ea..61757ce50a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -155,6 +155,8 @@ func (x placeholderExtension) Cardinality() pref.Cardinality { retu func (x placeholderExtension) Kind() pref.Kind { return 0 } func (x placeholderExtension) HasJSONName() bool { return false } func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index c1d890233c..7dd994bd95 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -15,7 +15,6 @@ import ( "google.golang.org/protobuf/internal/genname" "google.golang.org/protobuf/reflect/protoreflect" pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -109,7 +108,7 @@ func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { type ( SizeCache = int32 - WeakFields = map[int32]piface.MessageV1 + WeakFields = map[int32]protoreflect.ProtoMessage UnknownFields = []byte ExtensionFields = map[int32]ExtensionField ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index aac55ee8d8..3eb2b1390f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -53,7 +53,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fs := si.fieldsByNumber[fd.Number()] var fi fieldInfo switch { - case fd.ContainingOneof() != nil: + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) case fd.IsMap(): fi = fieldInfoForMap(fd, fs, mi.Exporter) @@ -72,7 +72,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { mi.oneofs = map[pref.Name]*oneofInfo{} for i := 0; i < md.Oneofs().Len(); i++ { od := md.Oneofs().Get(i) - mi.oneofs[od.Name()] = makeOneofInfo(od, si.oneofsByName[od.Name()], mi.Exporter, si.oneofWrappersByType) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) } mi.denseFields = make([]*fieldInfo, fds.Len()*2) @@ -84,7 +84,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { for i := 0; i < fds.Len(); { fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) i += od.Fields().Len() } else { @@ -170,6 +170,8 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { return x.Value().List().Len() > 0 case xd.IsMap(): return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() } return true } @@ -186,15 +188,28 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { return xt.Zero() } func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { - if !xt.IsValidValue(v) { + xd := xt.TypeDescriptor() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) } + if *m == nil { *m = make(map[int32]ExtensionField) } var x ExtensionField x.Set(xt, v) - (*m)[int32(xt.TypeDescriptor().Number())] = x + (*m)[int32(xd.Number())] = x } func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { xd := xt.TypeDescriptor() diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 7b87d4712f..ea6f75548a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -221,7 +221,7 @@ var ( func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type - nullable := fd.Syntax() == pref.Proto2 + nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 if nullable { if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { @@ -290,9 +290,9 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor rv.Set(conv.GoValueOf(v)) if isBytes && rv.Len() == 0 { if nullable { - rv.Set(emptyBytes) // preserve presence in proto2 + rv.Set(emptyBytes) // preserve presence } else { - rv.Set(nilBytes) // do not preserve presence in proto3 + rv.Set(nilBytes) // do not preserve presence } } }, @@ -426,11 +426,25 @@ type oneofInfo struct { which func(pointer) pref.FieldNumber } -func makeOneofInfo(od pref.OneofDescriptor, fs reflect.StructField, x exporter, wrappersByType map[reflect.Type]pref.FieldNumber) *oneofInfo { - fieldOffset := offsetOf(fs, x) - return &oneofInfo{ - oneofDesc: od, - which: func(p pointer) pref.FieldNumber { +func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { if p.IsNil() { return 0 } @@ -442,7 +456,8 @@ func makeOneofInfo(od pref.OneofDescriptor, fs reflect.StructField, x exporter, if rv.IsNil() { return 0 } - return wrappersByType[rv.Type().Elem()] - }, + return si.oneofWrappersByType[rv.Type().Elem()] + } } + return oi } diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 39d62fd068..57de9cc85b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -108,7 +108,7 @@ const ( func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { - case fd.ContainingOneof() != nil: + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): switch fd.Kind() { case pref.MessageKind: vi.typ = validationTypeMessage diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go index 575c988ccb..009cbefd1e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -5,9 +5,10 @@ package impl import ( - "reflect" + "fmt" pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // weakFields adds methods to the exported WeakFields type for internal use. @@ -16,31 +17,58 @@ import ( // defined directly on it. type weakFields WeakFields -func (w *weakFields) get(num pref.FieldNumber) (_ pref.ProtoMessage, ok bool) { - if *w == nil { - return nil, false - } - m, ok := (*w)[int32(num)] - if !ok { - return nil, false - } - // As a legacy quirk, consider a typed nil to be unset. - // - // TODO: Consider fixing the generated set methods to clear the field - // when provided with a typed nil. - if v := reflect.ValueOf(m); v.Kind() == reflect.Ptr && v.IsNil() { - return nil, false - } - return Export{}.ProtoMessageV2Of(m), true +func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok } func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { if *w == nil { *w = make(weakFields) } - (*w)[int32(num)] = Export{}.ProtoMessageV1Of(m) + (*w)[int32(num)] = m } func (w *weakFields) clear(num pref.FieldNumber) { delete(*w, int32(num)) } + +func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index cd84284c9d..774719e4ff 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 21 + Minor = 22 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go index d7c99235ec..3e9a6a2f66 100644 --- a/vendor/google.golang.org/protobuf/proto/checkinit.go +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -12,6 +12,12 @@ import ( // CheckInitialized returns an error if any required fields in m are not set. func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + return checkInitialized(m.ProtoReflect()) } diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index fa738a15da..456bfda478 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -74,19 +74,54 @@ type MarshalOptions struct { // Marshal returns the wire-format encoding of m. func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } return out.Buf, err } // Marshal returns the wire-format encoding of m. func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } return out.Buf, err } +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// m1.OptionalBytes, _ = proto.Marshal(m2) +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + // MarshalAppend appends the wire-format encoding of m to b, // returning the result. func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + out, err := o.marshal(b, m.ProtoReflect()) return out.Buf, err } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 73f2431f1a..5f293cda86 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -9,28 +9,84 @@ import ( ) // HasExtension reports whether an extension field is populated. -// It panics if ext does not extend m. -func HasExtension(m Message, ext protoreflect.ExtensionType) bool { - return m.ProtoReflect().Has(ext.TypeDescriptor()) +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + return false + } + + return m.ProtoReflect().Has(xt.TypeDescriptor()) } // ClearExtension clears an extension field such that subsequent // HasExtension calls return false. -// It panics if ext does not extend m. -func ClearExtension(m Message, ext protoreflect.ExtensionType) { - m.ProtoReflect().Clear(ext.TypeDescriptor()) +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) } // GetExtension retrieves the value for an extension field. // If the field is unpopulated, it returns the default value for -// scalars and an immutable, empty value for lists, maps, or messages. -// It panics if ext does not extend m. -func GetExtension(m Message, ext protoreflect.ExtensionType) interface{} { - return ext.InterfaceOf(m.ProtoReflect().Get(ext.TypeDescriptor())) +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) } // SetExtension stores the value of an extension field. -// It panics if ext does not extend m or if value type is invalid for the field. -func SetExtension(m Message, ext protoreflect.ExtensionType, value interface{}) { - m.ProtoReflect().Set(ext.TypeDescriptor(), ext.ValueOf(value)) +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) } diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index df72f98952..27ab1a686e 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,6 +21,9 @@ import ( // It is semantically equivalent to unmarshaling the encoded form of src // into dst with the UnmarshalOptions.Merge option specified. func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() if dstMsg.Descriptor() != srcMsg.Descriptor() { panic("descriptor mismatch") diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index a4e72bd9c0..11ba841464 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -18,6 +18,11 @@ func Size(m Message) int { // Size returns the size in bytes of the wire-format encoding of m. func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + return sizeMessage(m.ProtoReflect()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 1b89bedabd..b669a4e761 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -22,8 +22,9 @@ // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support -// additions to the protobuf language. Protobuf descriptors can be constructed -// using the "google.golang.org/protobuf/reflect/protodesc" package. +// additions to the protobuf language. +// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. // // // Go Type Descriptors diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 41d560e738..5be14a7258 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -281,11 +281,19 @@ type FieldDescriptor interface { // It is usually the camel-cased form of the field name. JSONName() string + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + // IsExtension reports whether this is an extension field. If false, // then Parent and ContainingMessage refer to the same message. // Otherwise, ContainingMessage and Parent likely differ. IsExtension() bool + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + // IsWeak reports whether this is a weak field, which does not impose a // direct dependency on the target type. // If true, then Message returns a placeholder type. @@ -375,6 +383,11 @@ type FieldDescriptors interface { type OneofDescriptor interface { Descriptor + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with HasOptionalKeyword specified. + IsSynthetic() bool + // Fields is a list of fields belonging to this oneof. Fields() FieldDescriptors diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index a352ed9e6c..f319810778 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -114,8 +114,8 @@ type Message interface { // Mutable is a mutating operation and unsafe for concurrent use. Mutable(FieldDescriptor) Value - // NewField returns a new value for assignable to the field of a given descriptor. - // For scalars, this returns the default value. + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. // For lists, maps, and messages, this returns a new, empty, mutable value. NewField(FieldDescriptor) Value diff --git a/vendor/modules.txt b/vendor/modules.txt index 4b02d0798a..8a39d1e8c9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -59,7 +59,7 @@ github.com/PuerkitoBio/goquery github.com/PuerkitoBio/purell # github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 github.com/PuerkitoBio/urlesc -# github.com/RoaringBitmap/roaring v0.4.21 +# github.com/RoaringBitmap/roaring v0.4.23 ## explicit github.com/RoaringBitmap/roaring # github.com/andybalholm/cascadia v1.0.0 @@ -74,7 +74,7 @@ github.com/aymerick/douceur/css github.com/beorn7/perks/quantile # github.com/bgentry/speakeasy v0.1.0 ## explicit -# github.com/blevesearch/bleve v0.8.1 +# github.com/blevesearch/bleve v1.0.7 ## explicit github.com/blevesearch/bleve github.com/blevesearch/bleve/analysis @@ -96,7 +96,6 @@ github.com/blevesearch/bleve/index github.com/blevesearch/bleve/index/scorch github.com/blevesearch/bleve/index/scorch/mergeplan github.com/blevesearch/bleve/index/scorch/segment -github.com/blevesearch/bleve/index/scorch/segment/zap github.com/blevesearch/bleve/index/store github.com/blevesearch/bleve/index/store/boltdb github.com/blevesearch/bleve/index/store/gtreap @@ -116,14 +115,19 @@ github.com/blevesearch/bleve/search/query github.com/blevesearch/bleve/search/scorer github.com/blevesearch/bleve/search/searcher github.com/blevesearch/bleve/size -# github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 -## explicit -# github.com/blevesearch/go-porterstemmer v1.0.2 -## explicit +# github.com/blevesearch/go-porterstemmer v1.0.3 github.com/blevesearch/go-porterstemmer -# github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f -## explicit +# github.com/blevesearch/mmap-go v1.0.2 +github.com/blevesearch/mmap-go +# github.com/blevesearch/segment v0.9.0 github.com/blevesearch/segment +# github.com/blevesearch/snowballstem v0.9.0 +github.com/blevesearch/snowballstem +github.com/blevesearch/snowballstem/english +# github.com/blevesearch/zap/v11 v11.0.7 +github.com/blevesearch/zap/v11 +# github.com/blevesearch/zap/v12 v12.0.7 +github.com/blevesearch/zap/v12 # github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 ## explicit github.com/boombuler/barcode @@ -140,8 +144,7 @@ github.com/couchbase/gomemcached/client # github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 github.com/couchbase/goutils/logging github.com/couchbase/goutils/scramsha -# github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd -## explicit +# github.com/couchbase/vellum v1.0.1 github.com/couchbase/vellum github.com/couchbase/vellum/levenshtein github.com/couchbase/vellum/regexp @@ -171,8 +174,6 @@ github.com/dustin/go-humanize # github.com/editorconfig/editorconfig-core-go/v2 v2.1.1 ## explicit github.com/editorconfig/editorconfig-core-go/v2 -# github.com/edsrzf/mmap-go v1.0.0 -github.com/edsrzf/mmap-go # github.com/emirpasic/gods v1.12.0 ## explicit github.com/emirpasic/gods/containers @@ -181,9 +182,6 @@ github.com/emirpasic/gods/lists/arraylist github.com/emirpasic/gods/trees github.com/emirpasic/gods/trees/binaryheap github.com/emirpasic/gods/utils -# github.com/etcd-io/bbolt v1.3.3 -## explicit -github.com/etcd-io/bbolt # github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a ## explicit github.com/ethantkoenig/rupture @@ -345,7 +343,7 @@ github.com/gogs/chardet github.com/gogs/cron # github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe github.com/golang-sql/civil -# github.com/golang/protobuf v1.4.0 +# github.com/golang/protobuf v1.4.1 ## explicit github.com/golang/protobuf/proto # github.com/golang/snappy v0.0.1 @@ -510,7 +508,7 @@ github.com/mitchellh/go-homedir github.com/mitchellh/mapstructure # github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c github.com/mrjones/oauth -# github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae +# github.com/mschoch/smat v0.2.0 github.com/mschoch/smat # github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc ## explicit @@ -593,8 +591,7 @@ github.com/spf13/jwalterweatherman github.com/spf13/pflag # github.com/spf13/viper v1.4.0 github.com/spf13/viper -# github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 -## explicit +# github.com/steveyen/gtreap v0.1.0 github.com/steveyen/gtreap # github.com/stretchr/testify v1.4.0 ## explicit @@ -615,7 +612,7 @@ github.com/syndtr/goleveldb/leveldb/table github.com/syndtr/goleveldb/leveldb/util # github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 ## explicit -# github.com/tinylib/msgp v1.1.1 +# github.com/tinylib/msgp v1.1.2 ## explicit github.com/tinylib/msgp/msgp # github.com/toqueteos/trie v1.0.0 @@ -665,8 +662,8 @@ github.com/yuin/goldmark/util # github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60 ## explicit github.com/yuin/goldmark-meta -# go.etcd.io/bbolt v1.3.3 -## explicit +# go.etcd.io/bbolt v1.3.4 +go.etcd.io/bbolt # go.mongodb.org/mongo-driver v1.1.1 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -722,9 +719,10 @@ golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd +# golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f ## explicit golang.org/x/sys/cpu +golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/svc @@ -793,7 +791,7 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/protobuf v1.21.0 +# google.golang.org/protobuf v1.22.0 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt