From d8975fb82a0c9ad5125ec39a50546d3bacd3e83f Mon Sep 17 00:00:00 2001 From: Todd Short Date: Wed, 26 Feb 2025 22:37:51 -0500 Subject: [PATCH] Update operator-registry to v1.51.0 (#3525) Signed-off-by: Todd Short --- go.mod | 40 +- go.sum | 142 ++--- .../docker/cli/cli/config/config.go | 2 +- .../cli/cli/config/credentials/file_store.go | 36 +- .../grpc-gateway/v2/runtime/convert.go | 6 +- .../grpc-gateway/v2/runtime/errors.go | 15 + .../grpc-gateway/v2/runtime/fieldmask.go | 2 +- .../v2/runtime/marshaler_registry.go | 4 +- .../grpc-gateway/v2/runtime/proto2_convert.go | 4 +- .../grpc-gateway/v2/utilities/pattern.go | 2 +- .../v2/utilities/string_array_flag.go | 2 +- .../operator-registry/alpha/model/error.go | 7 +- .../operator-registry/alpha/model/model.go | 1 + .../alpha/property/property.go | 4 +- .../operator-registry/pkg/api/api_to_model.go | 1 + .../operator-registry/pkg/api/model_to_api.go | 21 +- .../operator-registry/pkg/client/client.go | 5 +- .../operator-registry/pkg/client/errors.go | 1 + .../pkg/client/kubeclient.go | 10 +- .../pkg/configmap/configmap.go | 17 +- .../pkg/configmap/configmap_writer.go | 2 +- .../pkg/containertools/containertool.go | 15 +- .../pkg/containertools/dockerfilegenerator.go | 8 +- .../pkg/containertools/labelreader.go | 1 + .../pkg/containertools/runner.go | 19 +- .../pkg/image/containerdregistry/options.go | 21 +- .../pkg/image/containerdregistry/registry.go | 38 +- .../pkg/image/execregistry/registry.go | 2 +- .../operator-registry/pkg/image/mock.go | 2 +- .../operator-registry/pkg/lib/bundle/build.go | 1 + .../pkg/lib/bundle/errors.go | 1 + .../pkg/lib/bundle/exporter.go | 1 - .../pkg/lib/bundle/generate.go | 12 +- .../pkg/lib/bundle/interpreter.go | 7 +- .../pkg/lib/bundle/validate.go | 18 +- .../pkg/lib/semver/semver.go | 2 + .../pkg/lib/validation/bundle.go | 10 +- .../prettyunmarshaler/prettyunmarshaler.go | 15 +- .../operator-registry/pkg/registry/bundle.go | 21 +- .../pkg/registry/bundlegraphloader.go | 5 +- .../pkg/registry/channelupdateoptions.go | 1 + .../operator-registry/pkg/registry/csv.go | 60 ++- .../operator-registry/pkg/registry/decode.go | 22 +- .../pkg/registry/directoryGraphLoader.go | 6 +- .../operator-registry/pkg/registry/empty.go | 13 +- .../operator-registry/pkg/registry/graph.go | 2 +- .../pkg/registry/imageinput.go | 3 +- .../operator-registry/pkg/registry/parse.go | 6 +- .../pkg/registry/populator.go | 4 + .../pkg/registry/registry_to_model.go | 3 +- .../operator-registry/pkg/registry/types.go | 9 + .../operator-registry/pkg/sqlite/configmap.go | 16 +- .../pkg/sqlite/conversion.go | 4 +- .../pkg/sqlite/db_options.go | 2 + .../operator-registry/pkg/sqlite/deprecate.go | 1 + .../pkg/sqlite/deprecationmessage.go | 2 +- .../operator-registry/pkg/sqlite/directory.go | 9 +- .../operator-registry/pkg/sqlite/load.go | 61 ++- .../operator-registry/pkg/sqlite/loadprocs.go | 4 +- .../sqlite/migrations/001_related_images.go | 14 +- .../sqlite/migrations/003_required_apis.go | 40 +- .../migrations/005_version_skiprange.go | 4 +- .../006_associate_apis_with_bundle.go | 4 +- .../sqlite/migrations/007_replaces_skips.go | 14 +- .../pkg/sqlite/migrations/009_properties.go | 5 +- .../010_set_bundlepath_pkg_property.go | 8 +- .../pkg/sqlite/migrations/012_deprecated.go | 1 + .../pkg/sqlite/migrations/migrations.go | 1 + .../operator-registry/pkg/sqlite/migrator.go | 16 +- .../operator-registry/pkg/sqlite/query.go | 91 ++-- vendor/go.etcd.io/bbolt/.gitignore | 2 + vendor/go.etcd.io/bbolt/.go-version | 2 +- vendor/go.etcd.io/bbolt/Makefile | 62 ++- vendor/go.etcd.io/bbolt/OWNERS | 10 + vendor/go.etcd.io/bbolt/README.md | 68 ++- .../bbolt/{bolt_unix_aix.go => bolt_aix.go} | 1 - vendor/go.etcd.io/bbolt/bolt_android.go | 90 ++++ vendor/go.etcd.io/bbolt/bolt_arm64.go | 1 - vendor/go.etcd.io/bbolt/bolt_loong64.go | 1 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 1 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 1 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 1 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 1 - .../{bolt_unix_solaris.go => bolt_solaris.go} | 0 vendor/go.etcd.io/bbolt/bolt_unix.go | 7 +- vendor/go.etcd.io/bbolt/bolt_windows.go | 8 +- vendor/go.etcd.io/bbolt/boltsync_unix.go | 1 - vendor/go.etcd.io/bbolt/bucket.go | 484 +++++++++++++----- vendor/go.etcd.io/bbolt/cursor.go | 99 ++-- vendor/go.etcd.io/bbolt/db.go | 451 ++++++++-------- vendor/go.etcd.io/bbolt/errors.go | 76 ++- vendor/go.etcd.io/bbolt/errors/errors.go | 84 +++ vendor/go.etcd.io/bbolt/freelist.go | 410 --------------- vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 ------- .../bbolt/internal/common/bucket.go | 54 ++ .../go.etcd.io/bbolt/internal/common/inode.go | 115 +++++ .../go.etcd.io/bbolt/internal/common/meta.go | 161 ++++++ .../go.etcd.io/bbolt/internal/common/page.go | 391 ++++++++++++++ .../go.etcd.io/bbolt/internal/common/types.go | 40 ++ .../bbolt/{ => internal/common}/unsafe.go | 10 +- .../go.etcd.io/bbolt/internal/common/utils.go | 64 +++ .../bbolt/internal/common/verify.go | 67 +++ .../bbolt/internal/freelist/array.go | 108 ++++ .../bbolt/internal/freelist/freelist.go | 82 +++ .../bbolt/internal/freelist/hashmap.go | 292 +++++++++++ .../bbolt/internal/freelist/shared.go | 310 +++++++++++ vendor/go.etcd.io/bbolt/logger.go | 113 ++++ vendor/go.etcd.io/bbolt/mlock_unix.go | 1 - vendor/go.etcd.io/bbolt/node.go | 252 ++++----- vendor/go.etcd.io/bbolt/page.go | 212 -------- vendor/go.etcd.io/bbolt/tx.go | 234 +++++---- vendor/go.etcd.io/bbolt/tx_check.go | 206 +++++--- .../net/http/otelhttp/common.go | 7 - .../net/http/otelhttp/handler.go | 31 +- .../internal/request/resp_writer_wrapper.go | 11 +- .../net/http/otelhttp/internal/semconv/env.go | 80 ++- .../http/otelhttp/internal/semconv/v1.20.0.go | 104 +++- .../net/http/otelhttp/start_time_context.go | 29 ++ .../net/http/otelhttp/transport.go | 58 +-- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 8 - vendor/go.opentelemetry.io/otel/.golangci.yml | 2 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 49 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 18 +- vendor/go.opentelemetry.io/otel/Makefile | 2 +- .../tracetransform/instrumentation.go | 5 +- .../otlptrace/internal/tracetransform/span.go | 8 +- .../otlp/otlptrace/otlptracegrpc/client.go | 7 +- .../internal/otlpconfig/options.go | 6 +- .../otel/exporters/otlp/otlptrace/version.go | 2 +- .../otel/internal/global/instruments.go | 14 +- .../otel/internal/global/meter.go | 69 ++- .../otel/internal/global/trace.go | 8 +- .../otel/sdk/instrumentation/scope.go | 4 + .../otel/sdk/resource/auto.go | 62 +-- .../otel/sdk/resource/builtin.go | 6 +- .../otel/sdk/resource/host_id_windows.go | 7 +- .../otel/sdk/resource/os_windows.go | 1 - .../otel/sdk/trace/batch_span_processor.go | 7 +- .../otel/sdk/trace/evictedqueue.go | 21 +- .../otel/sdk/trace/provider.go | 9 +- .../otel/sdk/trace/span.go | 108 ++-- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 15 +- vendor/golang.org/x/mod/modfile/read.go | 5 + .../protobuf/encoding/protojson/decode.go | 5 - .../protobuf/encoding/prototext/decode.go | 5 - .../protobuf/internal/encoding/tag/tag.go | 8 +- .../protobuf/internal/filedesc/desc.go | 9 +- .../protobuf/internal/filedesc/desc_lazy.go | 9 - .../protobuf/internal/filetype/build.go | 2 +- .../protobuf/internal/flags/flags.go | 2 +- .../protobuf/internal/genid/goname.go | 5 - .../protobuf/internal/impl/codec_field.go | 75 --- .../protobuf/internal/impl/codec_map.go | 14 +- .../protobuf/internal/impl/codec_map_go111.go | 38 -- .../protobuf/internal/impl/codec_map_go112.go | 12 - .../protobuf/internal/impl/codec_message.go | 7 +- .../internal/impl/codec_message_opaque.go | 9 +- .../protobuf/internal/impl/convert_map.go | 2 +- .../protobuf/internal/impl/lazy.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 5 +- .../protobuf/internal/impl/message.go | 23 +- .../protobuf/internal/impl/message_opaque.go | 17 +- .../protobuf/internal/impl/message_reflect.go | 5 - .../internal/impl/message_reflect_field.go | 130 +---- .../protobuf/internal/impl/pointer_unsafe.go | 3 +- .../protobuf/internal/impl/validate.go | 24 +- .../protobuf/internal/impl/weak.go | 74 --- .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 5 - .../protobuf/reflect/protodesc/desc.go | 8 +- .../protobuf/reflect/protodesc/desc_init.go | 1 - .../reflect/protodesc/desc_resolve.go | 26 +- .../reflect/protodesc/desc_validate.go | 12 - .../protobuf/reflect/protodesc/editions.go | 48 +- .../protobuf/reflect/protodesc/proto.go | 3 - .../protobuf/reflect/protoreflect/type.go | 12 +- .../types/descriptorpb/descriptor.pb.go | 12 +- .../protobuf/types/dynamicpb/types.go | 12 +- .../types/gofeaturespb/go_features.pb.go | 12 +- .../protobuf/types/known/anypb/any.pb.go | 12 +- .../types/known/durationpb/duration.pb.go | 12 +- .../protobuf/types/known/emptypb/empty.pb.go | 12 +- .../types/known/fieldmaskpb/field_mask.pb.go | 12 +- .../types/known/structpb/struct.pb.go | 12 +- .../types/known/timestamppb/timestamp.pb.go | 12 +- .../types/known/wrapperspb/wrappers.pb.go | 12 +- vendor/modules.txt | 55 +- 193 files changed, 4416 insertions(+), 2833 deletions(-) create mode 100644 vendor/go.etcd.io/bbolt/OWNERS rename vendor/go.etcd.io/bbolt/{bolt_unix_aix.go => bolt_aix.go} (99%) create mode 100644 vendor/go.etcd.io/bbolt/bolt_android.go rename vendor/go.etcd.io/bbolt/{bolt_unix_solaris.go => bolt_solaris.go} (100%) create mode 100644 vendor/go.etcd.io/bbolt/errors/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bucket.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/inode.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/meta.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/page.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/types.go rename vendor/go.etcd.io/bbolt/{ => internal/common}/unsafe.go (74%) create mode 100644 vendor/go.etcd.io/bbolt/internal/common/utils.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/verify.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/array.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/freelist.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/shared.go create mode 100644 vendor/go.etcd.io/bbolt/logger.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go diff --git a/go.mod b/go.mod index 8af1e6fb84..bbd1a86664 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/openshift/api v3.9.0+incompatible github.com/openshift/client-go v0.0.0-20220525160904-9e1acff93e4a github.com/operator-framework/api v0.29.0 - github.com/operator-framework/operator-registry v1.50.0 + github.com/operator-framework/operator-registry v1.51.0 github.com/otiai10/copy v1.14.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.21.0 @@ -38,7 +38,7 @@ require ( golang.org/x/net v0.35.0 golang.org/x/sync v0.11.0 golang.org/x/time v0.10.0 - google.golang.org/grpc v1.69.4 + google.golang.org/grpc v1.70.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.32.2 k8s.io/apiextensions-apiserver v0.32.2 @@ -56,7 +56,7 @@ require ( ) require ( - cel.dev/expr v0.18.0 // indirect + cel.dev/expr v0.19.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -71,19 +71,19 @@ require ( github.com/containerd/containerd v1.7.25 // indirect github.com/containerd/containerd/api v1.8.0 // indirect github.com/containerd/continuity v0.4.4 // indirect - github.com/containerd/errdefs v0.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/containerd/ttrpc v1.2.5 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect - github.com/containers/common v0.61.0 // indirect + github.com/containers/common v0.62.0 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect github.com/containers/storage v1.57.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/cli v27.5.1+incompatible // indirect + github.com/docker/cli v28.0.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v27.5.1+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect @@ -102,7 +102,7 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-migrate/migrate/v4 v4.18.1 // indirect + github.com/golang-migrate/migrate/v4 v4.18.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect @@ -113,7 +113,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect github.com/h2non/filetype v1.1.3 // indirect github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -143,25 +143,25 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/bbolt v1.3.11 // indirect + go.etcd.io/bbolt v1.4.0 // indirect go.etcd.io/etcd/api/v3 v3.5.16 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.16 // indirect go.etcd.io/etcd/client/v3 v3.5.16 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 // indirect - go.opentelemetry.io/otel/metric v1.31.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect + go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.32.0 // indirect + go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/trace v1.32.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.33.0 // indirect - golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 // indirect - golang.org/x/mod v0.22.0 // indirect + golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect + golang.org/x/mod v0.23.0 // indirect golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect @@ -169,9 +169,9 @@ require ( golang.org/x/tools v0.29.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect - google.golang.org/protobuf v1.36.2 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/go.sum b/go.sum index 871d5a9625..0d1293c700 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.0 h1:lXuo+nDhpyJSpWxpPVi5cPUwzKb+dsdOiw6IreM5yt0= +cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= @@ -1391,8 +1391,8 @@ github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVM github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= -github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4= -github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= @@ -1403,8 +1403,8 @@ github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oL github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/common v0.61.0 h1:j/84PTqZIKKYy42OEJsZmjZ4g4Kq2ERuC3tqp2yWdh4= -github.com/containers/common v0.61.0/go.mod h1:NGRISq2vTFPSbhNqj6MLwyes4tWSlCnqbJg7R77B8xc= +github.com/containers/common v0.62.0 h1:Sl9WE5h7Y/F3bejrMAA4teP1EcY9ygqJmW4iwSloZ10= +github.com/containers/common v0.62.0/go.mod h1:Yec+z8mrSq4rydHofrnDCBqAcNA/BGrSg1kfFUL6F6s= github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI= github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= @@ -1425,12 +1425,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/distribution/distribution/v3 v3.0.0-rc.1 h1:6M4ewmPBUhF7wtQ8URLOQ1W/PQuVKiD1u8ymwLDUGqQ= -github.com/distribution/distribution/v3 v3.0.0-rc.1/go.mod h1:tFjaPDeHCrLg28e4feBIy27cP+qmrc/mvkl6MFIfVi4= +github.com/distribution/distribution/v3 v3.0.0-rc.3 h1:JRJso9IVLoooKX76oWR+DWCCdZlK5m4nRtDWvzB1ITg= +github.com/distribution/distribution/v3 v3.0.0-rc.3/go.mod h1:offoOgrnYs+CFwis8nE0hyzYZqRCZj5EFc5kgfszwiE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY= -github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.0.0+incompatible h1:ido37VmLUqEp+5NFb9icd6BuBB+SNDgCn+5kPCr2buA= +github.com/docker/cli v28.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= @@ -1536,10 +1536,10 @@ github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFT github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang-migrate/migrate/v4 v4.18.1 h1:JML/k+t4tpHCpQTCAD62Nu43NUFzHY4CV3uAuvHGC+Y= -github.com/golang-migrate/migrate/v4 v4.18.1/go.mod h1:HAX6m3sQgcdO81tdjn5exv20+3Kb13cmGli1hrD6hks= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-migrate/migrate/v4 v4.18.2 h1:2VSCMz7x7mjyTXx3m2zPokOY82LTRgxK1yQYKo6wWQ8= +github.com/golang-migrate/migrate/v4 v4.18.2/go.mod h1:2CM6tJvn2kqPXwnXO/d3rAQYiyoIm180VsO8PRX6Rpk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -1689,8 +1689,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/h2non/go-is-svg v0.0.0-20160927212452-35e8c4b0612c h1:fEE5/5VNnYUoBOj2I9TP8Jc+a7lge3QWn9DKE7NCwfc= @@ -1829,8 +1829,8 @@ github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= github.com/operator-framework/api v0.29.0 h1:TxAR8RCO+I4FjRrY4PSMgnlmbxNWeD8pzHXp7xwHNmw= github.com/operator-framework/api v0.29.0/go.mod h1:0whQE4mpMDd2zyHkQe+bFa3DLoRs6oGWCbu8dY/3pyc= -github.com/operator-framework/operator-registry v1.50.0 h1:kMAwsKAEDjuSx5dGchMX+CD3SMHWwOAC/xyK3LQweB4= -github.com/operator-framework/operator-registry v1.50.0/go.mod h1:713Z/XzA5jViFMGIsXmfAcpA6h5uUKqUl3fO1t4taa0= +github.com/operator-framework/operator-registry v1.51.0 h1:3T1H2W0wYvJx82x+Ue6nooFsn859ceJf1yH6MdRdjMQ= +github.com/operator-framework/operator-registry v1.51.0/go.mod h1:dJadFTSvsgpeiqhTMK7+zXrhU0LIlx4Y/aDz0efq5oQ= github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= @@ -1876,8 +1876,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= @@ -1936,8 +1936,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.etcd.io/etcd/api/v3 v3.5.16 h1:WvmyJVbjWqK4R1E+B12RRHz3bRGy9XVfh++MgbN+6n0= go.etcd.io/etcd/api/v3 v3.5.16/go.mod h1:1P4SlIP/VwkDmGo3OlOD7faPeP8KDIFhqvciH5EfN28= go.etcd.io/etcd/client/pkg/v3 v3.5.16 h1:ZgY48uH6UvB+/7R9Yf4x574uCO3jIx0TRDyetSfId3Q= @@ -1961,63 +1961,65 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/bridges/prometheus v0.54.0 h1:WWL67oxtknNVMb70lJXxXruf8UyK/a9hmIE1XO3Uedg= -go.opentelemetry.io/contrib/bridges/prometheus v0.54.0/go.mod h1:LqNcnXmyULp8ertk4hUTVtSUvKXj4h1Mx7gUCSSr/q0= -go.opentelemetry.io/contrib/exporters/autoexport v0.54.0 h1:dTmcmVm4J54IRPGm5oVjLci1uYat4UDea84E2tyBaAk= -go.opentelemetry.io/contrib/exporters/autoexport v0.54.0/go.mod h1:zPp5Fwpq2Hc7xMtVttg6GhZMcfTESjVbY9ONw2o/Dc4= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w= +go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4= +go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94= go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= -go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= -go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0 h1:4d++HQ+Ihdl+53zSjtsCUFDmNMju2FC9qFkUlTxPLqo= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.5.0/go.mod h1:mQX5dTO3Mh5ZF7bPKDkt5c/7C41u/SiDr9XgTpzXXn8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0/go.mod h1:t4BrYLHU450Zo9fnydWlIuswB1bm7rM8havDpWOJeDo= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0 h1:xvhQxJ/C9+RTnAj5DpTg7LSM1vbbMTiXt7e9hsfqHNw= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.29.0/go.mod h1:Fcvs2Bz1jkDM+Wf5/ozBGmi3tQ/c9zPKLnsipnfhGAo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= -go.opentelemetry.io/otel/exporters/prometheus v0.51.0 h1:G7uexXb/K3T+T9fNLCCKncweEtNEBMTO+46hKX5EdKw= -go.opentelemetry.io/otel/exporters/prometheus v0.51.0/go.mod h1:v0mFe5Kk7woIh938mrZBJBmENYquyA0IICrlYm4Y0t4= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.5.0 h1:ThVXnEsdwNcxdBO+r96ci1xbF+PgNjwlk457VNuJODo= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.5.0/go.mod h1:rHWcSmC4q2h3gje/yOq6sAOaq8+UHxN/Ru3BbmDXOfY= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0 h1:X3ZjNp36/WlkSYx0ul2jw4PtbNEDDeLskw3VPsrpYM0= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.29.0/go.mod h1:2uL/xnOXh0CHOBFCWXz5u1A4GXLiW+0IQIzVbeOEQ0U= -go.opentelemetry.io/otel/log v0.5.0 h1:x1Pr6Y3gnXgl1iFBwtGy1W/mnzENoK0w0ZoaeOI3i30= -go.opentelemetry.io/otel/log v0.5.0/go.mod h1:NU/ozXeGuOR5/mjCRXYbTC00NFJ3NYuraV/7O78F0rE= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0/go.mod h1:5KXybFvPGds3QinJWQT7pmXf+TN5YIa7CNYObWRkj50= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 h1:IJFEoHiytixx8cMiVAO+GmHR6Frwu+u5Ur8njpFO6Ac= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0/go.mod h1:3rHrKNtLIoS0oZwkY2vxi+oJcwFRWdtUyRII+so45p8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 h1:9kV11HXBHZAvuPUZxmMWrH8hZn/6UnHX4K0mu36vNsU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0/go.mod h1:JyA0FHXe22E1NeNiHmVp7kFHglnexDQ7uRWDiiJ1hKQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0 h1:cMyu9O88joYEaI47CnQkxO1XZdpoTF9fEnW2duIddhw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.32.0/go.mod h1:6Am3rn7P9TVVeXYG+wtcGE7IE1tsQ+bP3AuWcKt/gOI= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= +go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0 h1:CHXNXwfKWfzS65yrlB2PVds1IBZcdsX8Vepy9of0iRU= +go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.8.0/go.mod h1:zKU4zUgKiaRxrdovSS2amdM5gOc59slmo/zJwGX+YBg= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsux7Qmq8ToKAx1XCilTQECZ0KDZyTw= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s= +go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= +go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= -go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= -go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/sdk/log v0.5.0 h1:A+9lSjlZGxkQOr7QSBJcuyyYBw79CufQ69saiJLey7o= -go.opentelemetry.io/otel/sdk/log v0.5.0/go.mod h1:zjxIW7sw1IHolZL2KlSAtrUi8JHttoeiQy43Yl3WuVQ= -go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= -go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= +go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs= +go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo= +go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= +go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= -go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= -go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -2074,8 +2076,8 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMk golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= -golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588= +golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -2122,8 +2124,8 @@ golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2744,8 +2746,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go. google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= google.golang.org/genproto/googleapis/api v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:PVreiBMirk8ypES6aw9d4p6iiBNSIfZEBqr3UGoAi2E= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= -google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a h1:OAiGFfOiA0v9MRYsSidp3ubZaBnteRUyn3xB2ZQ5G/E= +google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= google.golang.org/genproto/googleapis/bytestream v0.0.0-20231030173426-d783a09b4405/go.mod h1:GRUCuLdzVqZte8+Dl/D4N25yLzcGqqWaYkeVOwulFqw= @@ -2804,8 +2806,8 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= -google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go index 5a51843260..910b3c0064 100644 --- a/vendor/github.com/docker/cli/cli/config/config.go +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -143,7 +143,7 @@ func load(configDir string) (*configfile.ConfigFile, error) { defer file.Close() err = configFile.LoadFromReader(file) if err != nil { - err = errors.Wrapf(err, "loading config file: %s: ", filename) + err = errors.Wrapf(err, "parsing config file (%s)", filename) } return configFile, err } diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go index 9540628150..c69312b014 100644 --- a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -1,9 +1,12 @@ package credentials import ( + "fmt" "net" "net/url" + "os" "strings" + "sync/atomic" "github.com/docker/cli/cli/config/types" ) @@ -57,6 +60,21 @@ func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { return c.file.GetAuthConfigs(), nil } +// unencryptedWarning warns the user when using an insecure credential storage. +// After a deprecation period, user will get prompted if stdin and stderr are a terminal. +// Otherwise, we'll assume they want it (sadly), because people may have been scripting +// insecure logins and we don't want to break them. Maybe they'll see the warning in their +// logs and fix things. +const unencryptedWarning = ` +WARNING! Your credentials are stored unencrypted in '%s'. +Configure a credential helper to remove this warning. See +https://docs.docker.com/go/credential-store/ +` + +// alreadyPrinted ensures that we only print the unencryptedWarning once per +// CLI invocation (no need to warn the user multiple times per command). +var alreadyPrinted atomic.Bool + // Store saves the given credentials in the file store. This function is // idempotent and does not update the file if credentials did not change. func (c *fileStore) Store(authConfig types.AuthConfig) error { @@ -66,15 +84,19 @@ func (c *fileStore) Store(authConfig types.AuthConfig) error { return nil } authConfigs[authConfig.ServerAddress] = authConfig - return c.file.Save() -} + if err := c.file.Save(); err != nil { + return err + } -func (c *fileStore) GetFilename() string { - return c.file.GetFilename() -} + if !alreadyPrinted.Load() && authConfig.Password != "" { + // Display a warning if we're storing the users password (not a token). + // + // FIXME(thaJeztah): make output configurable instead of hardcoding to os.Stderr + _, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf(unencryptedWarning, c.file.GetFilename())) + alreadyPrinted.Store(true) + } -func (c *fileStore) IsFileStore() bool { - return true + return nil } // ConvertToHostname converts a registry url which has http|https prepended diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb3..2e50082ad1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 01f5734191..41cd4f5030 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0bf..2fcd7af3c4 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e89..07c28112c8 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f20..f710036b35 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de4864..38ca39cc53 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776c..66aa5f2dcc 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go b/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go index 0ad0f7adba..e99cb2ca8c 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/model/error.go @@ -2,6 +2,7 @@ package model import ( "bytes" + "errors" "fmt" "strings" ) @@ -31,7 +32,7 @@ func (v *validationError) Error() string { func (v *validationError) errorPrefix(prefix []rune, last bool, seen []error) string { for _, s := range seen { - if v == s { + if errors.Is(v, s) { return "" } } @@ -56,7 +57,9 @@ func (v *validationError) errorPrefix(prefix []rune, last bool, seen []error) st } else { subPrefix = append(subPrefix, []rune("├── ")...) } - if verr, ok := serr.(*validationError); ok { + + var verr *validationError + if errors.As(serr, &verr) { errMsg.WriteString(verr.errorPrefix(subPrefix, subLast, seen)) } else { errMsg.WriteString(fmt.Sprintf("%s%s\n", string(subPrefix), serr)) diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go b/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go index d570f93c3e..9b4e3ae858 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/model/model.go @@ -161,6 +161,7 @@ func (i *Icon) Validate() error { return result.orNil() } +// nolint:unused func (i *Icon) validateData() error { if !filetype.IsImage(i.Data) { return errors.New("icon data is not an image") diff --git a/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go b/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go index 6869b2e679..6fb792dda2 100644 --- a/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go +++ b/vendor/github.com/operator-framework/operator-registry/alpha/property/property.go @@ -7,8 +7,9 @@ import ( "fmt" "reflect" - "github.com/operator-framework/api/pkg/operators/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/operator-framework/api/pkg/operators/v1alpha1" ) type Property struct { @@ -177,6 +178,7 @@ func Deduplicate(in []Property) []Property { } props := map[key]Property{} + // nolint:prealloc var out []Property for _, p := range in { k := key{p.Type, string(p.Value)} diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go b/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go index 5c0cb603aa..50088ab4fd 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/api/api_to_model.go @@ -42,6 +42,7 @@ func ConvertAPIBundleToModelBundle(b *Bundle) (*model.Bundle, error) { } func convertAPIBundleToModelProperties(b *Bundle) ([]property.Property, error) { + // nolint:prealloc var out []property.Property providedGVKs := map[property.GVK]struct{}{} diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go b/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go index e7714713d2..b3368383fd 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/api/model_to_api.go @@ -5,10 +5,11 @@ import ( "encoding/json" "fmt" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/operator-framework/api/pkg/lib/version" "github.com/operator-framework/api/pkg/operators" "github.com/operator-framework/api/pkg/operators/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/operator-framework/operator-registry/alpha/model" "github.com/operator-framework/operator-registry/alpha/property" @@ -20,8 +21,8 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { return nil, fmt.Errorf("parse properties: %v", err) } - csvJson := b.CsvJSON - if csvJson == "" && len(props.CSVMetadatas) == 1 { + csvJSON := b.CsvJSON + if csvJSON == "" && len(props.CSVMetadatas) == 1 { var icons []v1alpha1.Icon if b.Package.Icon != nil { icons = []v1alpha1.Icon{{ @@ -37,7 +38,7 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { // attemptint to write to a nil map. StrategyName: "deployment", } - csv.Spec.Version = version.OperatorVersion{b.Version} + csv.Spec.Version = version.OperatorVersion{Version: b.Version} csv.Spec.RelatedImages = convertModelRelatedImagesToCSVRelatedImages(b.RelatedImages) if csv.Spec.Description == "" { csv.Spec.Description = b.Package.Description @@ -46,9 +47,9 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { if err != nil { return nil, err } - csvJson = string(csvData) + csvJSON = string(csvData) if len(b.Objects) == 0 { - b.Objects = []string{csvJson} + b.Objects = []string{csvJSON} } } @@ -76,7 +77,7 @@ func ConvertModelBundleToAPIBundle(b model.Bundle) (*Bundle, error) { Properties: convertModelPropertiesToAPIProperties(b.Properties), Replaces: b.Replaces, Skips: b.Skips, - CsvJson: csvJson, + CsvJson: csvJSON, Object: b.Objects, Deprecation: deprecation, }, nil @@ -127,6 +128,7 @@ func csvMetadataToCsv(m property.CSVMetadata) v1alpha1.ClusterServiceVersion { } func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ @@ -138,6 +140,7 @@ func gvksProvidedtoAPIGVKs(in []property.GVK) []*GroupVersionKind { return out } func gvksRequirestoAPIGVKs(in []property.GVKRequired) []*GroupVersionKind { + // nolint:prealloc var out []*GroupVersionKind for _, gvk := range in { out = append(out, &GroupVersionKind{ @@ -150,9 +153,9 @@ func gvksRequirestoAPIGVKs(in []property.GVKRequired) []*GroupVersionKind { } func convertModelPropertiesToAPIProperties(props []property.Property) []*Property { + // nolint:prealloc var out []*Property for _, prop := range props { - // NOTE: This is a special case filter to prevent problems with existing client implementations that // project bundle properties into CSV annotations and store those CSVs in a size-constrained // storage backend (e.g. etcd via kube-apiserver). If the bundle object property has data inlined @@ -172,6 +175,7 @@ func convertModelPropertiesToAPIProperties(props []property.Property) []*Propert } func convertModelPropertiesToAPIDependencies(props []property.Property) ([]*Dependency, error) { + // nolint:prealloc var out []*Dependency for _, prop := range props { switch prop.Type { @@ -196,6 +200,7 @@ func convertModelPropertiesToAPIDependencies(props []property.Property) ([]*Depe } func convertModelRelatedImagesToCSVRelatedImages(in []model.RelatedImage) []v1alpha1.RelatedImage { + // nolint:prealloc var out []v1alpha1.RelatedImage for _, ri := range in { out = append(out, v1alpha1.RelatedImage{ diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/client/client.go b/vendor/github.com/operator-framework/operator-registry/pkg/client/client.go index ed3637daef..c8fdaf19f2 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/client/client.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/client/client.go @@ -2,6 +2,7 @@ package client import ( "context" + "errors" "io" "time" @@ -49,7 +50,7 @@ func (it *BundleIterator) Next() *api.Bundle { return nil } next, err := it.stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } if err != nil { @@ -67,6 +68,7 @@ func (c *Client) GetBundle(ctx context.Context, packageName, channelName, csvNam } func (c *Client) GetBundleInPackageChannel(ctx context.Context, packageName, channelName string) (*api.Bundle, error) { + // nolint:staticcheck return c.Registry.GetBundleForChannel(ctx, &api.GetBundleInChannelRequest{PkgName: packageName, ChannelName: channelName}) } @@ -116,6 +118,7 @@ func (c *Client) HealthCheck(ctx context.Context, reconnectTimeout time.Duration } func NewClient(address string) (*Client, error) { + // nolint:staticcheck conn, err := grpc.Dial(address, grpc.WithInsecure()) if err != nil { return nil, err diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/client/errors.go b/vendor/github.com/operator-framework/operator-registry/pkg/client/errors.go index 948012c9fe..b9320501dc 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/client/errors.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/client/errors.go @@ -51,6 +51,7 @@ func IsErrorUnrecoverable(err error) bool { } func reasonForError(err error) HealthErrorReason { + // nolint:errorlint switch t := err.(type) { case HealthError: return t.Reason diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/client/kubeclient.go b/vendor/github.com/operator-framework/operator-registry/pkg/client/kubeclient.go index 17a6532f88..7b63e1c55d 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/client/kubeclient.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/client/kubeclient.go @@ -10,13 +10,14 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func NewKubeClient(kubeconfig string, logger *logrus.Logger) (clientset *kubernetes.Clientset, err error) { +func NewKubeClient(kubeconfig string, logger *logrus.Logger) (*kubernetes.Clientset, error) { var config *rest.Config if overrideConfig := os.Getenv(clientcmd.RecommendedConfigPathEnvVar); overrideConfig != "" { kubeconfig = overrideConfig } + var err error if kubeconfig != "" { logger.Infof("Loading kube client config from path %q", kubeconfig) config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) @@ -26,10 +27,11 @@ func NewKubeClient(kubeconfig string, logger *logrus.Logger) (clientset *kuberne } if err != nil { + // nolint:stylecheck err = fmt.Errorf("Cannot load config for REST client: %v", err) - return + return nil, err } - clientset, err = kubernetes.NewForConfig(config) - return + clientset, err := kubernetes.NewForConfig(config) + return clientset, err } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go b/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go index 0c95407e2f..2b310371fa 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap.go @@ -32,10 +32,11 @@ type BundleLoader struct { // creates an operator registry Bundle object. // If the Data section has a PackageManifest resource then it is also // deserialized and included in the result. -func (l *BundleLoader) Load(cm *corev1.ConfigMap) (bundle *api.Bundle, err error) { +func (l *BundleLoader) Load(cm *corev1.ConfigMap) (*api.Bundle, error) { + var err error if cm == nil { err = errors.New("ConfigMap must not be ") - return + return nil, err } logger := l.logger.WithFields(logrus.Fields{ @@ -45,15 +46,15 @@ func (l *BundleLoader) Load(cm *corev1.ConfigMap) (bundle *api.Bundle, err error bundle, skipped, bundleErr := loadBundle(logger, cm) if bundleErr != nil { err = fmt.Errorf("failed to extract bundle from configmap - %v", bundleErr) - return + return nil, err } l.logger.Debugf("couldn't unpack skipped: %#v", skipped) - return + return bundle, nil } -func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (bundle *api.Bundle, skipped map[string]string, err error) { - bundle = &api.Bundle{Object: []string{}} - skipped = map[string]string{} +func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (*api.Bundle, map[string]string, error) { + bundle := &api.Bundle{Object: []string{}} + skipped := map[string]string{} data := cm.Data if hasGzipEncodingAnnotation(cm) { @@ -95,7 +96,7 @@ func loadBundle(entry *logrus.Entry, cm *corev1.ConfigMap) (bundle *api.Bundle, logger.Infof("added to bundle, Kind=%s", resource.GetKind()) } - return + return bundle, skipped, nil } func decodeGzipBinaryData(cm *corev1.ConfigMap) (map[string]string, error) { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap_writer.go b/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap_writer.go index dd8931cc9e..1ee4ff9173 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap_writer.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/configmap/configmap_writer.go @@ -139,7 +139,7 @@ func (c *ConfigMapWriter) Populate(maxDataSizeLimit uint64) error { logrus.WithFields(logrus.Fields{ "file.Name": file.Name(), "validConfigMapKey": validConfigMapKey, - }).Info("translated filename for configmap comptability") + }).Info("translated filename for configmap compatibility") } if c.gzip { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go index ea38c21d17..b2e51977a7 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/containertool.go @@ -8,7 +8,8 @@ const ( DockerTool ) -func (t ContainerTool) String() (s string) { +func (t ContainerTool) String() string { + var s string switch t { case NoneTool: s = "none" @@ -17,7 +18,7 @@ func (t ContainerTool) String() (s string) { case DockerTool: s = "docker" } - return + return s } func (t ContainerTool) CommandFactory() CommandFactory { @@ -30,7 +31,8 @@ func (t ContainerTool) CommandFactory() CommandFactory { return &StubCommandFactory{} } -func NewContainerTool(s string, defaultTool ContainerTool) (t ContainerTool) { +func NewContainerTool(s string, defaultTool ContainerTool) ContainerTool { + var t ContainerTool switch s { case "podman": t = PodmanTool @@ -41,16 +43,17 @@ func NewContainerTool(s string, defaultTool ContainerTool) (t ContainerTool) { default: t = defaultTool } - return + return t } // NewCommandContainerTool returns a tool that can be used in `exec` statements. -func NewCommandContainerTool(s string) (t ContainerTool) { +func NewCommandContainerTool(s string) ContainerTool { + var t ContainerTool switch s { case "docker": t = DockerTool default: t = PodmanTool } - return + return t } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go index 79059b9ee7..dd46ce22f5 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/dockerfilegenerator.go @@ -9,9 +9,11 @@ import ( const ( DefaultBinarySourceImage = "quay.io/operator-framework/opm:latest" - DefaultDbLocation = "/database/index.db" - DbLocationLabel = "operators.operatorframework.io.index.database.v1" - ConfigsLocationLabel = "operators.operatorframework.io.index.configs.v1" + // nolint:stylecheck + DefaultDbLocation = "/database/index.db" + // nolint:stylecheck + DbLocationLabel = "operators.operatorframework.io.index.database.v1" + ConfigsLocationLabel = "operators.operatorframework.io.index.configs.v1" ) // DockerfileGenerator defines functions to generate index dockerfiles diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go index 57de738296..18ad46d981 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/labelreader.go @@ -71,5 +71,6 @@ func (r ImageLabelReader) GetLabelsFromImage(image string) (map[string]string, e return data[0].Labels, nil } + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse label data from container") } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go index 660c92c6a6..b5995b40f9 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/containertools/runner.go @@ -2,6 +2,7 @@ package containertools import ( + "errors" "fmt" "os/exec" "strings" @@ -83,13 +84,14 @@ func (r *ContainerCommandRunner) GetToolName() string { func (r *ContainerCommandRunner) Pull(image string) error { args := r.argsForCmd("pull", image) + // nolint:gosec command := exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s", command.String()) out, err := command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error pulling image: %s. %v", string(out), err) } @@ -114,7 +116,7 @@ func (r *ContainerCommandRunner) Build(dockerfile, tag string) error { out, err := command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error building image: %s. %v", string(out), err) } @@ -125,6 +127,7 @@ func (r *ContainerCommandRunner) Build(dockerfile, tag string) error { func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { args := r.argsForCmd("create", image, "") + // nolint:gosec command := exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s create", r.containerTool) @@ -133,7 +136,8 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err := command.Output() if err != nil { msg := err.Error() - if exitErr, ok := err.(*exec.ExitError); ok { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { msg = fmt.Sprintf("%s: %s", err, exitErr.Stderr) } return fmt.Errorf("error creating container %s: %s", string(out), msg) @@ -141,6 +145,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { id := strings.TrimSuffix(string(out), "\n") args = r.argsForCmd("cp", id+":"+src, dst) + // nolint:gosec command = exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s cp", r.containerTool) @@ -148,11 +153,12 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err = command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error copying container directory %s: %v", string(out), err) } args = r.argsForCmd("rm", id) + // nolint:gosec command = exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s rm", r.containerTool) @@ -160,7 +166,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { out, err = command.CombinedOutput() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return fmt.Errorf("error removing container %s: %v", string(out), err) } @@ -172,6 +178,7 @@ func (r *ContainerCommandRunner) Unpack(image, src, dst string) error { func (r *ContainerCommandRunner) Inspect(image string) ([]byte, error) { args := r.argsForCmd("inspect", image) + // nolint:gosec command := exec.Command(r.containerTool.String(), args...) r.logger.Infof("running %s inspect", r.containerTool) @@ -179,7 +186,7 @@ func (r *ContainerCommandRunner) Inspect(image string) ([]byte, error) { out, err := command.Output() if err != nil { - r.logger.Errorf(string(out)) + r.logger.Error(string(out)) return nil, err } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go index d447dc1558..c045750e2d 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/options.go @@ -60,28 +60,31 @@ func defaultConfig() *RegistryConfig { // NewRegistry returns a new containerd Registry and a function to destroy it after use. // The destroy function is safe to call more than once, but is a no-op after the first call. -func NewRegistry(options ...RegistryOption) (registry *Registry, err error) { +func NewRegistry(options ...RegistryOption) (*Registry, error) { + var registry *Registry + config := defaultConfig() config.apply(options) - if err = config.complete(); err != nil { - return + if err := config.complete(); err != nil { + return nil, err } cs, err := contentlocal.NewStore(config.CacheDir) if err != nil { - return + return nil, err } var bdb *bolt.DB bdb, err = bolt.Open(config.DBPath, 0644, nil) if err != nil { - return + return nil, err } var once sync.Once + // nolint:nonamedreturns destroy := func() (destroyErr error) { once.Do(func() { - if destroyErr = bdb.Close(); destroyErr != nil { + if err := bdb.Close(); err != nil { return } if config.PreserveCache { @@ -102,12 +105,13 @@ func NewRegistry(options ...RegistryOption) (registry *Registry, err error) { resolverFunc: func(repo string) (remotes.Resolver, error) { return NewResolver(httpClient, config.ResolverConfigDir, config.PlainHTTP, repo) }, + // nolint: staticcheck platform: platforms.Ordered(platforms.DefaultSpec(), specs.Platform{ OS: "linux", Architecture: "amd64", }), } - return + return registry, nil } type RegistryOption func(config *RegistryConfig) @@ -168,12 +172,15 @@ func newClient(skipTlSVerify bool, roots *x509.CertPool) *http.Client { TLSClientConfig: &tls.Config{ InsecureSkipVerify: false, RootCAs: roots, + MinVersion: tls.VersionTLS12, }, } if skipTlSVerify { transport.TLSClientConfig = &tls.Config{ + // nolint:gosec InsecureSkipVerify: true, + MinVersion: tls.VersionTLS12, } } headers := http.Header{} diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go index 61fb5c73de..9c421dc682 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/containerdregistry/registry.go @@ -14,11 +14,11 @@ import ( "github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" + "github.com/containerd/errdefs" "github.com/containers/image/v5/docker/reference" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -34,7 +34,8 @@ type Registry struct { destroy func() error log *logrus.Entry resolverFunc func(repo string) (remotes.Resolver, error) - platform platforms.MatchComparer + // nolint:staticcheck + platform platforms.MatchComparer } var _ image.Registry = &Registry{} @@ -56,9 +57,26 @@ func (r *Registry) Pull(ctx context.Context, ref image.Reference) error { return err } - name, root, err := resolver.Resolve(ctx, ref.String()) - if err != nil { - return fmt.Errorf("error resolving name for image ref %s: %v", ref.String(), err) + retryBackoff := wait.Backoff{ + Duration: 1 * time.Second, + Factor: 1.0, + Jitter: 0.1, + Steps: 5, + } + + var name string + var root ocispec.Descriptor + if err := retry.OnError(retryBackoff, + func(pullErr error) bool { + r.log.Warnf("Error resolving registry %q: %v. Retrying", ref.String(), pullErr) + return true + }, + func() error { + name, root, err = resolver.Resolve(ctx, ref.String()) + return err + }, + ); err != nil { + return fmt.Errorf("error resolving remote name %s: %v", ref.String(), err) } r.log.Debugf("resolved name: %s", name) @@ -67,13 +85,6 @@ func (r *Registry) Pull(ctx context.Context, ref image.Reference) error { return err } - retryBackoff := wait.Backoff{ - Duration: 1 * time.Second, - Factor: 1.0, - Jitter: 0.1, - Steps: 5, - } - if err := retry.OnError(retryBackoff, func(pullErr error) bool { if nonRetriablePullError.MatchString(pullErr.Error()) { @@ -143,7 +154,7 @@ func (r *Registry) Labels(ctx context.Context, ref image.Reference) (map[string] } // Destroy cleans up the on-disk boltdb file and other cache files, unless preserve cache is true -func (r *Registry) Destroy() (err error) { +func (r *Registry) Destroy() error { return r.destroy() } @@ -263,6 +274,7 @@ const paxSchilyXattr = "SCHILY.xattr." // dropXattrs removes all xattrs from a Header. // This is useful for unpacking on systems where writing certain xattrs is a restricted operation; e.g. "security.capability" on SELinux. func dropXattrs(h *tar.Header) (bool, error) { + // nolint:staticcheck h.Xattrs = nil // Deprecated, but still in use, clear anyway. for key := range h.PAXRecords { if strings.HasPrefix(key, paxSchilyXattr) { // Xattrs are stored under keys with the "Schilly.xattr." prefix. diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go index 40769d23e7..0d299b66d8 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/execregistry/registry.go @@ -26,7 +26,7 @@ type Registry struct { var _ image.Registry = &Registry{} // NewRegistry instantiates and returns a new registry which manipulates images via exec podman/docker commands. -func NewRegistry(tool containertools.ContainerTool, logger *logrus.Entry, opts ...containertools.RunnerOption) (registry *Registry, err error) { +func NewRegistry(tool containertools.ContainerTool, logger *logrus.Entry, opts ...containertools.RunnerOption) (*Registry, error) { return &Registry{ log: logger, cmd: containertools.NewCommandRunner(tool, logger, opts...), diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go b/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go index f46d58516f..1709a4a5d2 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/image/mock.go @@ -39,7 +39,7 @@ func (i *MockImage) unpack(dir string) error { if err := os.MkdirAll(pathDir, 0777); err != nil { return err } - return os.WriteFile(path, data, 0666) + return os.WriteFile(path, data, 0600) }) } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/build.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/build.go index 08b0fa808f..5bfb517fc5 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/build.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/build.go @@ -31,6 +31,7 @@ func ExecuteCommand(cmd *exec.Cmd) error { log.Debugf("Running %#v", cmd.Args) if err := cmd.Run(); err != nil { + // nolint:stylecheck return fmt.Errorf("Failed to exec %#v: %v", cmd.Args, err) } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/errors.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/errors.go index 5e0735adf7..869cf061c3 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/errors.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/errors.go @@ -12,6 +12,7 @@ type ValidationError struct { } func (v ValidationError) Error() string { + // nolint:prealloc var errs []string for _, err := range v.Errors { errs = append(errs, err.Error()) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/exporter.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/exporter.go index 043aac1ac5..49f50dc237 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/exporter.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/exporter.go @@ -30,7 +30,6 @@ func NewExporterForBundle(image, directory string, containerTool containertools. } func (i *BundleExporter) Export(skipTLSVerify, plainHTTP bool) error { - log := logrus.WithField("img", i.image) tmpDir, err := os.MkdirTemp("./", "bundle_tmp") diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/generate.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/generate.go index 7f20e42888..72e781e0b1 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/generate.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/generate.go @@ -79,6 +79,7 @@ func GenerateFunc(directory, outputDir, packageName, channels, channelDefault st // Channels and packageName are required fields where as default channel is automatically filled if unspecified // and that either of the required field is missing. We are interpreting the bundle information through // bundle directory embedded in the package folder. + // nolint:nestif if channels == "" || packageName == "" { var notProvided []string if channels == "" { @@ -155,9 +156,10 @@ func GenerateFunc(directory, outputDir, packageName, channels, channelDefault st // CopyYamlOutput takes the generated annotations yaml and writes it to disk. // If an outputDir is specified, it will copy the input manifests // It returns two strings. resultMetadata is the path to the output metadata/ folder. -// resultManifests is the path to the output manifests/ folder -- if no copy occured, +// resultManifests is the path to the output manifests/ folder -- if no copy occurred, // it just returns the input manifestDir -func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDir string, overwrite bool) (resultManifests, resultMetadata string, err error) { +func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDir string, overwrite bool) (string, string, error) { + var resultManifests, resultMetadata string // First, determine the parent directory of the metadata and manifest directories copyDir := "" @@ -204,6 +206,7 @@ func CopyYamlOutput(annotationsContent []byte, manifestDir, outputDir, workingDi // Currently able to detect helm chart, registry+v1 (CSV) and plain k8s resources // such as CRD. func GetMediaType(directory string) (string, error) { + // nolint:prealloc var files []string k8sFiles := make(map[string]*unstructured.Unstructured) @@ -219,6 +222,7 @@ func GetMediaType(directory string) (string, error) { fileWithPath := filepath.Join(directory, item.Name()) fileBlob, err := os.ReadFile(fileWithPath) if err != nil { + // nolint:stylecheck return "", fmt.Errorf("Unable to read file %s in bundle", fileWithPath) } @@ -230,6 +234,7 @@ func GetMediaType(directory string) (string, error) { } if len(files) == 0 { + // nolint:stylecheck return "", fmt.Errorf("The directory %s contains no yaml files", directory) } @@ -276,11 +281,13 @@ func ValidateAnnotations(existing, expected []byte) error { for label, item := range expectedAnnotations.Annotations { value, hasAnnotation := fileAnnotations.Annotations[label] if !hasAnnotation { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Missing field: %s", label)) continue } if item != value { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Expect field %q to have value %q instead of %q", label, item, value)) } @@ -443,6 +450,7 @@ func copyManifestDir(from, to string, overwrite bool) error { return nil } +// nolint:unused func containsString(slice []string, s string) bool { for _, item := range slice { if item == s { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/interpreter.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/interpreter.go index d523b82eb6..f3efaeea8f 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/interpreter.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/interpreter.go @@ -32,9 +32,10 @@ func NewBundleDirInterperter(bundleDir string) (*bundleDirInterpreter, error) { return &bundleDirInterpreter{bundleCsvName: csv.GetName(), pkg: p}, nil } -func (b *bundleDirInterpreter) GetBundleChannels() (channelNames []string) { +func (b *bundleDirInterpreter) GetBundleChannels() []string { + var channelNames []string for channelName, channel := range b.pkg.Channels { - for bundle, _ := range channel.Nodes { + for bundle := range channel.Nodes { if bundle.CsvName == b.bundleCsvName { channelNames = append(channelNames, channelName) break @@ -42,7 +43,7 @@ func (b *bundleDirInterpreter) GetBundleChannels() (channelNames []string) { } } sort.Strings(channelNames) - return + return channelNames } func (b *bundleDirInterpreter) GetDefaultChannel() string { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go index 66e29dffc0..2ed9266039 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/bundle/validate.go @@ -22,6 +22,7 @@ import ( "github.com/operator-framework/api/pkg/manifests" v1 "github.com/operator-framework/api/pkg/operators/v1alpha1" v "github.com/operator-framework/api/pkg/validation" + "github.com/operator-framework/operator-registry/pkg/image" validation "github.com/operator-framework/operator-registry/pkg/lib/validation" "github.com/operator-framework/operator-registry/pkg/registry" @@ -99,10 +100,12 @@ func (i imageValidator) ValidateBundleFormat(directory string) error { } } - if manifestsFound == false { + if !manifestsFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to locate manifests directory")) } - if metadataFound == false { + if !metadataFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to locate metadata directory")) } @@ -144,6 +147,7 @@ func (i imageValidator) ValidateBundleFormat(directory string) error { } if !annotationsFound { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Could not find annotations file")) } else { i.logger.Debug("Found annotations file") @@ -185,6 +189,7 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) for label, item := range annotations { val, ok := fileAnnotations.Annotations[label] if !ok && label != ChannelDefaultLabel { + // nolint:stylecheck aErr := fmt.Errorf("Missing annotation %q", label) validationErrors = append(validationErrors, aErr) } @@ -192,26 +197,31 @@ func validateAnnotations(mediaType string, fileAnnotations *AnnotationMetadata) switch label { case MediatypeLabel: if item != val { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, item, val) validationErrors = append(validationErrors, aErr) } case ManifestsLabel: if item != ManifestsDir { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, ManifestsDir, val) validationErrors = append(validationErrors, aErr) } case MetadataDir: if item != MetadataLabel { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have value %q instead of %q", label, MetadataDir, val) validationErrors = append(validationErrors, aErr) } case ChannelsLabel: if val == "" { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have non-empty value", label) validationErrors = append(validationErrors, aErr) } case ChannelDefaultLabel: if ok && val == "" { + // nolint:stylecheck aErr := fmt.Errorf("Expecting annotation %q to have non-empty value", label) validationErrors = append(validationErrors, aErr) } @@ -291,6 +301,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { fileWithPath := filepath.Join(manifestDir, item.Name()) data, err := os.ReadFile(fileWithPath) if err != nil { + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unable to read file %s in supported types", fileWithPath)) continue } @@ -313,6 +324,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { continue } + // nolint:nestif if gvk.Kind == CSVKind { err := runtime.DefaultUnstructuredConverter.FromUnstructured(k8sFile.Object, csv) if err != nil { @@ -361,6 +373,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { } } default: + // nolint:stylecheck validationErrors = append(validationErrors, fmt.Errorf("Unsupported api version of CRD: %s", gv)) } } else { @@ -390,6 +403,7 @@ func (i imageValidator) ValidateBundleContent(manifestDir string) error { if _, ok := optionalValidators[validateOperatorHubKey]; ok { i.logger.Debug("Performing operatorhub validation") bundle := &manifests.Bundle{Name: csvName, CSV: csv} + // nolint:staticcheck results := v.OperatorHubValidator.Validate(bundle) if len(results) > 0 { for _, err := range results[0].Errors { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go index 6875566d08..60721cdaf6 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/semver/semver.go @@ -8,6 +8,7 @@ import ( // BuildIdCompare compares two versions and returns negative one if the first arg is less than the second arg, positive one if it is larger, and zero if they are equal. // This comparison follows typical semver precedence rules, with one addition: whenever two versions are equal with the exception of their build-ids, the build-ids are compared using prerelease precedence rules. Further, versions with no build-id are always less than versions with build-ids; e.g. 1.0.0 < 1.0.0+1. +// nolint:stylecheck func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { if c := b.Compare(v); c != 0 { return c, nil @@ -27,6 +28,7 @@ func BuildIdCompare(b semver.Version, v semver.Version) (int, error) { } func buildAsPrerelease(v semver.Version) (*semver.Version, error) { + // nolint:prealloc var pre []semver.PRVersion for _, b := range v.Build { p, err := semver.NewPRVersion(b) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go b/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go index d8f6d5b8e5..a88b7b6302 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/lib/validation/bundle.go @@ -8,12 +8,14 @@ import ( "github.com/operator-framework/api/pkg/validation/errors" interfaces "github.com/operator-framework/api/pkg/validation/interfaces" + "github.com/operator-framework/operator-registry/pkg/registry" ) var RegistryBundleValidator interfaces.Validator = interfaces.ValidatorFunc(validateBundles) -func validateBundles(objs ...interface{}) (results []errors.ManifestResult) { +func validateBundles(objs ...interface{}) []errors.ManifestResult { + var results []errors.ManifestResult for _, obj := range objs { switch v := obj.(type) { case *registry.Bundle: @@ -23,7 +25,8 @@ func validateBundles(objs ...interface{}) (results []errors.ManifestResult) { return results } -func validateBundle(bundle *registry.Bundle) (result errors.ManifestResult) { +func validateBundle(bundle *registry.Bundle) errors.ManifestResult { + var result errors.ManifestResult csv, err := bundle.ClusterServiceVersion() if err != nil { result.Add(errors.ErrInvalidParse("error getting bundle CSV", err)) @@ -39,7 +42,8 @@ func validateBundle(bundle *registry.Bundle) (result errors.ManifestResult) { return result } -func validateOwnedCRDs(bundle *registry.Bundle, csv *registry.ClusterServiceVersion) (result errors.ManifestResult) { +func validateOwnedCRDs(bundle *registry.Bundle, csv *registry.ClusterServiceVersion) errors.ManifestResult { + var result errors.ManifestResult ownedKeys, _, err := csv.GetCustomResourceDefintions() if err != nil { result.Add(errors.ErrInvalidParse("error getting CSV CRDs", err)) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go b/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go index 2f740151a3..788428440c 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/prettyunmarshaler/prettyunmarshaler.go @@ -8,29 +8,29 @@ import ( "strings" ) -type JsonUnmarshalError struct { +type JSONUnmarshalError struct { data []byte offset int64 err error } -func NewJSONUnmarshalError(data []byte, err error) *JsonUnmarshalError { +func NewJSONUnmarshalError(data []byte, err error) *JSONUnmarshalError { var te *json.UnmarshalTypeError if errors.As(err, &te) { - return &JsonUnmarshalError{data: data, offset: te.Offset, err: te} + return &JSONUnmarshalError{data: data, offset: te.Offset, err: te} } var se *json.SyntaxError if errors.As(err, &se) { - return &JsonUnmarshalError{data: data, offset: se.Offset, err: se} + return &JSONUnmarshalError{data: data, offset: se.Offset, err: se} } - return &JsonUnmarshalError{data: data, offset: -1, err: err} + return &JSONUnmarshalError{data: data, offset: -1, err: err} } -func (e *JsonUnmarshalError) Error() string { +func (e *JSONUnmarshalError) Error() string { return e.err.Error() } -func (e *JsonUnmarshalError) Pretty() string { +func (e *JSONUnmarshalError) Pretty() string { if len(e.data) == 0 || e.offset < 0 || e.offset > int64(len(e.data)) { return e.err.Error() } @@ -82,7 +82,6 @@ func (e *JsonUnmarshalError) Pretty() string { // We found the byte in the pretty data that matches the byte in the original data, // so increment the pretty index. pIndex++ - } _, _ = sb.Write(pretty[:pOffset]) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go index b5fb28b941..8b3be74b0e 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundle.go @@ -7,7 +7,6 @@ import ( apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -54,7 +53,8 @@ type Bundle struct { func NewBundle(name string, annotations *Annotations, objs ...*unstructured.Unstructured) *Bundle { bundle := &Bundle{ - Name: name, + Name: name, + // nolint:staticcheck Package: annotations.PackageName, Annotations: annotations, } @@ -62,6 +62,7 @@ func NewBundle(name string, annotations *Annotations, objs ...*unstructured.Unst bundle.Add(o) } + // nolint:staticcheck if annotations == nil { return bundle } @@ -168,6 +169,7 @@ func (b *Bundle) CustomResourceDefinitions() ([]runtime.Object, error) { if err := b.cache(); err != nil { return nil, err } + // nolint:prealloc var crds []runtime.Object for _, crd := range b.v1crds { crds = append(crds, crd) @@ -235,7 +237,6 @@ func (b *Bundle) RequiredAPIs() (map[APIKey]struct{}, error) { return nil, fmt.Errorf("couldn't parse plural.group from crd name: %s", api.Name) } required[APIKey{parts[1], api.Version, api.Kind, parts[0]}] = struct{}{} - } _, requiredAPIs, err := csv.GetApiServiceDefinitions() if err != nil { @@ -278,10 +279,18 @@ func (b *Bundle) AllProvidedAPIsInBundle() error { return nil } -func (b *Bundle) Serialize() (csvName, bundleImage string, csvBytes []byte, bundleBytes []byte, annotationBytes []byte, err error) { +// (csvName, bundleImage string, csvBytes []byte, bundleBytes []byte, annotationBytes []byte, err error) { +func (b *Bundle) Serialize() (string, string, []byte, []byte, []byte, error) { + var bundleBytes []byte + var csvName string + var csvBytes []byte + var annotationBytes []byte + var err error + csvCount := 0 for _, obj := range b.Objects { - objBytes, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj) + var objBytes []byte + objBytes, err = runtime.Encode(unstructured.UnstructuredJSONScheme, obj) if err != nil { return "", "", nil, nil, nil, err } @@ -301,7 +310,7 @@ func (b *Bundle) Serialize() (csvName, bundleImage string, csvBytes []byte, bund } if b.Annotations != nil { - annotationBytes, err = json.Marshal(b.Annotations) + annotationBytes, _ = json.Marshal(b.Annotations) } return csvName, b.BundleImage, csvBytes, bundleBytes, annotationBytes, nil diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go index e8664c4e84..2854003a26 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/bundlegraphloader.go @@ -16,6 +16,7 @@ type BundleGraphLoader struct { func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, annotations *AnnotationsFile, skippatch bool) (*Package, error) { bundleVersion, err := bundle.Version() if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to extract bundle version from bundle %s, can't insert in semver mode", bundle.BundleImage) } @@ -43,6 +44,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann if graph.DefaultChannel == "" { // Infer default channel from channel list if annotations.SelectDefaultChannel() == "" { + // nolint:stylecheck return nil, fmt.Errorf("Default channel is missing and can't be inferred") } graph.DefaultChannel = annotations.SelectDefaultChannel() @@ -83,6 +85,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann for node := range channelGraph.Nodes { nodeVersion, err := semver.Make(node.Version) if err != nil { + // nolint:stylecheck return nil, fmt.Errorf("Unable to parse existing bundle version stored in index %s %s %s", node.CsvName, node.Version, node.BundlePath) } @@ -131,7 +134,7 @@ func (g *BundleGraphLoader) AddBundleToGraph(bundle *Bundle, graph *Package, ann // the new channel head if !lowestAhead.IsEmpty() { channelGraph.Nodes[lowestAhead] = map[BundleKey]struct{}{ - newBundleKey: struct{}{}, + newBundleKey: {}, } } else { channelGraph.Head = newBundleKey diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go index 85f5acb40e..d45bd414e1 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/channelupdateoptions.go @@ -22,6 +22,7 @@ func GetModeFromString(mode string) (Mode, error) { case "semver-skippatch": return SkipPatchMode, nil default: + // nolint:stylecheck return -1, fmt.Errorf("Invalid channel update mode %s specified", mode) } } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go index 8dcdf65adb..4a3d8ceaf7 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/csv.go @@ -7,19 +7,20 @@ import ( "os" "path" - prettyunmarshaler "github.com/operator-framework/operator-registry/pkg/prettyunmarshaler" - - v1 "k8s.io/api/apps/v1" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/yaml" "github.com/operator-framework/api/pkg/operators" + + prettyunmarshaler "github.com/operator-framework/operator-registry/pkg/prettyunmarshaler" ) const ( // Name of the CSV's kind + // nolint:unused clusterServiceVersionKind = "ClusterServiceVersion" // Name of the section under which the list of owned and required list of @@ -44,9 +45,11 @@ const ( icon = "icon" // The yaml attribute that points to the icon.base64data for the ClusterServiceVersion + // nolint:unused base64data = "base64data" // The yaml attribute that points to the icon.mediatype for the ClusterServiceVersion + // nolint:unused mediatype = "mediatype" // The yaml attribute that points to the description for the ClusterServiceVersion description = "description" @@ -131,7 +134,6 @@ func ReadCSVFromBundleDirectory(bundleDir string) (*ClusterServiceVersion, error return &csv, nil } return nil, fmt.Errorf("no ClusterServiceVersion object found in %s", bundleDir) - } // GetReplaces returns the name of the older ClusterServiceVersion object that @@ -224,16 +226,16 @@ func (csv *ClusterServiceVersion) GetSkips() ([]string, error) { // // If owned or required is not defined in the spec then an empty list is // returned respectively. -func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*DefinitionKey, required []*DefinitionKey, err error) { +func (csv *ClusterServiceVersion) GetCustomResourceDefintions() ([]*DefinitionKey, []*DefinitionKey, error) { var objmap map[string]*json.RawMessage - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { - return + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { + return nil, nil, err } rawValue, ok := objmap[customResourceDefinitions] if !ok || rawValue == nil { - return + return nil, nil, nil } var definitions struct { @@ -241,13 +243,11 @@ func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*Defini Required []*DefinitionKey `json:"required"` } - if err = json.Unmarshal(*rawValue, &definitions); err != nil { - return + if err := json.Unmarshal(*rawValue, &definitions); err != nil { + return nil, nil, err } - owned = definitions.Owned - required = definitions.Required - return + return definitions.Owned, definitions.Required, nil } // GetApiServiceDefinitions returns a list of owned and required @@ -261,16 +261,17 @@ func (csv *ClusterServiceVersion) GetCustomResourceDefintions() (owned []*Defini // // If owned or required is not defined in the spec then an empty list is // returned respectively. -func (csv *ClusterServiceVersion) GetApiServiceDefinitions() (owned []*DefinitionKey, required []*DefinitionKey, err error) { +// nolint:stylecheck +func (csv *ClusterServiceVersion) GetApiServiceDefinitions() ([]*DefinitionKey, []*DefinitionKey, error) { var objmap map[string]*json.RawMessage - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { return nil, nil, fmt.Errorf("error unmarshaling into object map: %s", err) } rawValue, ok := objmap[apiServiceDefinitions] if !ok || rawValue == nil { - return + return nil, nil, nil } var definitions struct { @@ -278,27 +279,25 @@ func (csv *ClusterServiceVersion) GetApiServiceDefinitions() (owned []*Definitio Required []*DefinitionKey `json:"required"` } - if err = json.Unmarshal(*rawValue, &definitions); err != nil { - return + if err := json.Unmarshal(*rawValue, &definitions); err != nil { + return nil, nil, err } - owned = definitions.Owned - required = definitions.Required - return + return definitions.Owned, definitions.Required, nil } // GetRelatedImage returns the list of associated images for the operator -func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct{}, err error) { +func (csv *ClusterServiceVersion) GetRelatedImages() (map[string]struct{}, error) { var objmap map[string]*json.RawMessage - imageSet = make(map[string]struct{}) + imageSet := make(map[string]struct{}) - if err = json.Unmarshal(csv.Spec, &objmap); err != nil { - return + if err := json.Unmarshal(csv.Spec, &objmap); err != nil { + return nil, err } rawValue, ok := objmap[relatedImages] if !ok || rawValue == nil { - return + return imageSet, nil } type relatedImage struct { @@ -306,15 +305,15 @@ func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct Ref string `json:"image"` } var relatedImages []relatedImage - if err = json.Unmarshal(*rawValue, &relatedImages); err != nil { - return + if err := json.Unmarshal(*rawValue, &relatedImages); err != nil { + return nil, err } for _, img := range relatedImages { imageSet[img.Ref] = struct{}{} } - return + return imageSet, nil } // GetOperatorImages returns a list of any images used to run the operator. @@ -322,7 +321,7 @@ func (csv *ClusterServiceVersion) GetRelatedImages() (imageSet map[string]struct func (csv *ClusterServiceVersion) GetOperatorImages() (map[string]struct{}, error) { type dep struct { Name string - Spec v1.DeploymentSpec + Spec appsv1.DeploymentSpec } type strategySpec struct { Deployments []dep @@ -416,7 +415,6 @@ func (csv *ClusterServiceVersion) GetSubstitutesFor() string { } func (csv *ClusterServiceVersion) UnmarshalJSON(data []byte) error { - if err := csv.UnmarshalSpec(data); err != nil { return err } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go index 0a9587d092..1818cc3055 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/decode.go @@ -13,36 +13,34 @@ import ( // DecodeUnstructured decodes a raw stream into a an // unstructured.Unstructured instance. -func DecodeUnstructured(reader io.Reader) (obj *unstructured.Unstructured, err error) { +func DecodeUnstructured(reader io.Reader) (*unstructured.Unstructured, error) { decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) t := &unstructured.Unstructured{} - if err = decoder.Decode(t); err != nil { - return + if err := decoder.Decode(t); err != nil { + return nil, err } - obj = t - return + return t, nil } // DecodePackageManifest decodes a raw stream into a a PackageManifest instance. // If a package name is empty we consider the object invalid! -func DecodePackageManifest(reader io.Reader) (manifest *PackageManifest, err error) { +func DecodePackageManifest(reader io.Reader) (*PackageManifest, error) { decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) obj := &PackageManifest{} if decodeErr := decoder.Decode(obj); decodeErr != nil { - err = fmt.Errorf("could not decode contents into package manifest - %v", decodeErr) - return + err := fmt.Errorf("could not decode contents into package manifest - %v", decodeErr) + return nil, err } if obj.PackageName == "" { - err = errors.New("name of package (packageName) is missing") - return + err := errors.New("name of package (packageName) is missing") + return nil, err } - manifest = obj - return + return obj, nil } func decodeFileFS(root fs.FS, path string, into interface{}, log *logrus.Entry) error { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go index a899f01e07..4b72091882 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/directoryGraphLoader.go @@ -51,7 +51,7 @@ func NewPackageGraphLoaderFromDir(packageDir string) (*DirGraphLoader, error) { func (g *DirGraphLoader) Generate() (*Package, error) { err := g.loadBundleCsvPathMap() if err != nil { - return nil, fmt.Errorf("error geting CSVs from bundles in the package directory, %v", err) + return nil, fmt.Errorf("error getting CSVs from bundles in the package directory, %v", err) } pkg, err := g.parsePackageYAMLFile() @@ -76,6 +76,7 @@ func (g *DirGraphLoader) loadBundleCsvPathMap() error { } CsvNameAndReplaceMap := make(map[string]csvReplaces) for _, bundlePath := range bundleDirs { + //nolint:nestif if bundlePath.IsDir() { csvStruct, err := ReadCSVFromBundleDirectory(filepath.Join(g.PackageDir, bundlePath.Name())) if err != nil { @@ -131,7 +132,7 @@ func (g *DirGraphLoader) getChannelNodes(channelHeadCsv string) *map[BundleKey]m // Iterate through remainingCSVsInChannel and add replaces of each encountered CSVs if not already in nodes. // Loop only exit after all remaining csvs are visited/deleted. for len(remainingCSVsInChannel) > 0 { - for bk, _ := range remainingCSVsInChannel { + for bk := range remainingCSVsInChannel { if _, ok := nodes[BundleKey{CsvName: bk.CsvName}]; !ok { nodes[BundleKey{CsvName: bk.CsvName}] = func() map[BundleKey]struct{} { subNode := make(map[BundleKey]struct{}) @@ -203,5 +204,4 @@ func convertFromPackageManifest(pkgManifest PackageManifest) *Package { DefaultChannel: pkgManifest.GetDefaultChannel(), Channels: pkgChannels, } - } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go index 936f39ccab..dc34f06dc7 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/empty.go @@ -40,7 +40,7 @@ func (EmptyQuery) GetBundleForChannel(ctx context.Context, pkgName string, chann return nil, errors.New("empty querier: cannot get bundle for channel") } -func (EmptyQuery) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetChannelEntriesThatReplace(ctx context.Context, name string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get channel entries that replace") } @@ -48,11 +48,11 @@ func (EmptyQuery) GetBundleThatReplaces(ctx context.Context, name, pkgName, chan return nil, errors.New("empty querier: cannot get bundle that replaces") } -func (EmptyQuery) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get channel entries that provide") } -func (EmptyQuery) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*ChannelEntry, err error) { +func (EmptyQuery) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*ChannelEntry, error) { return nil, errors.New("empty querier: cannot get latest channel entries that provide") } @@ -68,7 +68,8 @@ func (EmptyQuery) GetImagesForBundle(ctx context.Context, bundleName string) ([] return nil, errors.New("empty querier: cannot get image list") } -func (EmptyQuery) GetApisForEntry(ctx context.Context, entryId int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) { +// nolint:stylecheck +func (EmptyQuery) GetApisForEntry(ctx context.Context, entryId int64) ([]*api.GroupVersionKind, []*api.GroupVersionKind, error) { return nil, nil, errors.New("empty querier: cannot apis") } @@ -104,11 +105,11 @@ func (EmptyQuery) SendBundles(ctx context.Context, stream BundleSender) error { return errors.New("empty querier: cannot stream bundles") } -func (EmptyQuery) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) { +func (EmptyQuery) GetDependenciesForBundle(ctx context.Context, name, version, path string) ([]*api.Dependency, error) { return nil, errors.New("empty querier: cannot get dependencies for bundle") } -func (EmptyQuery) GetBundlePathIfExists(ctx context.Context, csvName string) (bundlePath string, err error) { +func (EmptyQuery) GetBundlePathIfExists(ctx context.Context, csvName string) (string, error) { return "", errors.New("empty querier: cannot get bundle path for bundle") } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go index 32185f1894..d2623f2a68 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/graph.go @@ -35,7 +35,7 @@ type Channel struct { func (c *Channel) String() string { var b strings.Builder - for node, _ := range c.Nodes { + for node := range c.Nodes { b.WriteString(node.String()) b.WriteString("\n") } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go index 69fe210ef9..ed287e6872 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/imageinput.go @@ -4,8 +4,9 @@ import ( "os" "path/filepath" - "github.com/operator-framework/operator-registry/pkg/image" "github.com/sirupsen/logrus" + + "github.com/operator-framework/operator-registry/pkg/image" ) type ImageInput struct { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go index 4b13ef7673..24445ffe08 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/parse.go @@ -6,9 +6,10 @@ import ( "io/fs" "strings" - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ) type bundleParser struct { @@ -156,6 +157,7 @@ func (b *bundleParser) addMetadata(metadata fs.FS, bundle *Bundle) error { bundle.Package = af.Annotations.PackageName bundle.Channels = af.GetChannels() } else { + // nolint:stylecheck return fmt.Errorf("Could not find annotations file") } @@ -184,6 +186,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { return nil, fmt.Errorf("bundle missing csv") } + // nolint:prealloc var derived []Property if len(csv.GetAnnotations()) > 0 { properties, ok := csv.GetAnnotations()[PropertyKey] @@ -235,6 +238,7 @@ func (b *bundleParser) derivedProperties(bundle *Bundle) ([]Property, error) { // propertySet returns the deduplicated set of a property list. func propertySet(properties []Property) []Property { + // nolint:prealloc var ( set []Property visited = map[string]struct{}{} diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go index 730d27fb9b..ea86a163e5 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/populator.go @@ -151,6 +151,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) // globalSanityCheck should have verified this to be a head without anything replacing it // and that we have a single overwrite per package + // nolint:nestif if len(i.overwrittenImages) > 0 { if overwriter, ok := i.loader.(HeadOverwriter); ok { // Assume loader has some way to handle overwritten heads if HeadOverwriter isn't implemented explicitly @@ -180,6 +181,7 @@ func (i *DirectoryPopulator) loadManifests(imagesToAdd []*ImageInput, mode Mode) } } default: + // nolint:stylecheck return fmt.Errorf("Unsupported update mode") } @@ -195,6 +197,7 @@ var packageContextKey = "package" // ContextWithPackage adds a package value to a context. func ContextWithPackage(ctx context.Context, pkg string) context.Context { + // nolint:staticcheck return context.WithValue(ctx, packageContextKey, pkg) } @@ -262,6 +265,7 @@ func (i *DirectoryPopulator) loadManifestsSemver(bundle *Bundle, skippatch bool) } // loadOperatorBundle adds the package information to the loader's store +// nolint:unused func (i *DirectoryPopulator) loadOperatorBundle(manifest PackageManifest, bundle *Bundle) error { if manifest.PackageName == "" { return nil diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go index 0ba64c72dd..947814751d 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/registry_to_model.go @@ -47,7 +47,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e if err := json.Unmarshal(p.Value, &v); err != nil { return nil, nil, property.ParseError{Idx: i, Typ: p.Type, Err: err} } - k := property.GVKRequired{Group: v.Group, Kind: v.Kind, Version: v.Version} + k := property.GVKRequired(v) requiredGVKs[k] = struct{}{} case property.TypePackage: var v property.Package @@ -90,6 +90,7 @@ func ObjectsAndPropertiesFromBundle(b *Bundle) ([]string, []property.Property, e } } + // nolint:prealloc var ( props []property.Property objects []string diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go b/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go index 3a5ab62936..4105aaa3d6 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/registry/types.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/blang/semver/v4" + "github.com/operator-framework/api/pkg/constraints" ) @@ -285,6 +286,7 @@ func (gd *GVKDependency) Validate() []error { func (ld *LabelDependency) Validate() []error { errs := []error{} if *ld == (LabelDependency{}) { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Label information is missing")) } return errs @@ -294,13 +296,16 @@ func (ld *LabelDependency) Validate() []error { func (pd *PackageDependency) Validate() []error { errs := []error{} if pd.PackageName == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package name is empty")) } if pd.Version == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Package version is empty")) } else { _, err := semver.ParseRange(pd.Version) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid semver format version")) } } @@ -311,15 +316,18 @@ func (pd *PackageDependency) Validate() []error { func (cc *CelConstraint) Validate() []error { errs := []error{} if cc.Cel == nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL field is missing")) } else { if cc.Cel.Rule == "" { + // nolint:stylecheck errs = append(errs, fmt.Errorf("The CEL expression is missing")) return errs } validator := constraints.NewCelEnvironment() _, err := validator.Validate(cc.Cel.Rule) if err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid CEL expression: %s", err.Error())) } } @@ -328,6 +336,7 @@ func (cc *CelConstraint) Validate() []error { // GetDependencies returns the list of dependency func (d *DependenciesFile) GetDependencies() []*Dependency { + // nolint:prealloc var dependencies []*Dependency for _, item := range d.Dependencies { dep := item diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go index 44e2302cc4..a1ce927f84 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/configmap.go @@ -6,7 +6,7 @@ import ( "strings" "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -47,7 +47,7 @@ func NewSQLLoaderForConfigMapData(logger *logrus.Entry, store registry.Load, con } } -func NewSQLLoaderForConfigMap(store registry.Load, configMap v1.ConfigMap) *ConfigMapLoader { +func NewSQLLoaderForConfigMap(store registry.Load, configMap corev1.ConfigMap) *ConfigMapLoader { logger := logrus.WithFields(logrus.Fields{"configmap": configMap.GetName(), "ns": configMap.GetNamespace()}) return &ConfigMapLoader{ log: logger, @@ -66,14 +66,14 @@ func (c *ConfigMapLoader) Populate() error { return fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCRDName) } - crdListJson, err := yaml.YAMLToJSON([]byte(crdListYaml)) + crdListJSON, err := yaml.YAMLToJSON([]byte(crdListYaml)) if err != nil { c.log.WithError(err).Debug("error loading CRD list") return err } var parsedCRDList []v1beta1.CustomResourceDefinition - if err := json.Unmarshal(crdListJson, &parsedCRDList); err != nil { + if err := json.Unmarshal(crdListJSON, &parsedCRDList); err != nil { c.log.WithError(err).Debug("error parsing CRD list") return err } @@ -106,14 +106,14 @@ func (c *ConfigMapLoader) Populate() error { errs = append(errs, fmt.Errorf("couldn't find expected key %s in configmap", ConfigMapCSVName)) return utilerrors.NewAggregate(errs) } - csvListJson, err := yaml.YAMLToJSON([]byte(csvListYaml)) + csvListJSON, err := yaml.YAMLToJSON([]byte(csvListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading CSV list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedCSVList []registry.ClusterServiceVersion - err = json.Unmarshal(csvListJson, &parsedCSVList) + err = json.Unmarshal(csvListJSON, &parsedCSVList) if err != nil { errs = append(errs, fmt.Errorf("error parsing CSV list: %s", err)) return utilerrors.NewAggregate(errs) @@ -164,14 +164,14 @@ func (c *ConfigMapLoader) Populate() error { return utilerrors.NewAggregate(errs) } - packageListJson, err := yaml.YAMLToJSON([]byte(packageListYaml)) + packageListJSON, err := yaml.YAMLToJSON([]byte(packageListYaml)) if err != nil { errs = append(errs, fmt.Errorf("error loading package list: %s", err)) return utilerrors.NewAggregate(errs) } var parsedPackageManifests []registry.PackageManifest - err = json.Unmarshal(packageListJson, &parsedPackageManifests) + err = json.Unmarshal(packageListJSON, &parsedPackageManifests) if err != nil { errs = append(errs, fmt.Errorf("error parsing package list: %s", err)) return utilerrors.NewAggregate(errs) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go index ff1da4c48e..47d2257f74 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/conversion.go @@ -7,9 +7,10 @@ import ( "fmt" "strings" - "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/sirupsen/logrus" + "github.com/operator-framework/api/pkg/operators/v1alpha1" + "github.com/operator-framework/operator-registry/alpha/model" "github.com/operator-framework/operator-registry/pkg/api" "github.com/operator-framework/operator-registry/pkg/registry" @@ -39,6 +40,7 @@ func initializeModelPackages(ctx context.Context, q *SQLQuerier) (model.Model, e return nil, err } + // nolint:prealloc var rPkgs []registry.PackageManifest for _, pkgName := range pkgNames { rPkg, err := q.GetPackage(ctx, pkgName) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go index e09bfbc036..5d43615f1d 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/db_options.go @@ -4,12 +4,14 @@ import ( "database/sql" ) +// nolint:stylecheck type DbOptions struct { // MigratorBuilder is a function that returns a migrator instance MigratorBuilder func(*sql.DB) (Migrator, error) EnableAlpha bool } +// nolint:stylecheck type DbOption func(*DbOptions) func defaultDBOptions() *DbOptions { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go index 4ac3d61ebf..80e11fc916 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecate.go @@ -72,6 +72,7 @@ func (d *PackageDeprecator) MaybeRemovePackages() error { var errs []error var removedBundlePaths []string + // nolint:prealloc var remainingBundlePaths []string // Iterate over bundles list - see if any bundle is the head of a default channel in a package diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go index 20a1389b71..a0b4bc75f6 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/deprecationmessage.go @@ -10,7 +10,7 @@ const noticeColor = "\033[1;33m%s\033[0m" func LogSqliteDeprecation() { log := logrus.New() - log.Warnf(DeprecationMessage) + log.Warn(DeprecationMessage) } var DeprecationMessage = fmt.Sprintf(noticeColor, `DEPRECATION NOTICE: diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go index 2ed0c595ef..a334ff6936 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/directory.go @@ -54,7 +54,9 @@ func (d *DirectoryLoader) Populate() error { // collectWalkErrs calls the given walk func and appends any non-nil, non skip dir error returned to the given errors slice. func collectWalkErrs(walk filepath.WalkFunc, errs *[]error) filepath.WalkFunc { - return func(path string, f os.FileInfo, err error) (walkErr error) { + return func(path string, f os.FileInfo, err error) error { + var walkErr error + // nolint: errorlint if walkErr = walk(path, f, err); walkErr != nil && walkErr != filepath.SkipDir { *errs = append(*errs, walkErr) return nil @@ -67,7 +69,7 @@ func collectWalkErrs(walk filepath.WalkFunc, errs *[]error) filepath.WalkFunc { // LoadBundleWalkFunc walks the directory. When it sees a `.clusterserviceversion.yaml` file, it // attempts to load the surrounding files in the same directory as a bundle, and stores them in the // db for querying -func (d *DirectoryLoader) LoadBundleWalkFunc(path string, f os.FileInfo, err error) error { +func (d *DirectoryLoader) LoadBundleWalkFunc(path string, f os.FileInfo, _ error) error { if f == nil { return fmt.Errorf("invalid file: %v", f) } @@ -131,7 +133,7 @@ func (d *DirectoryLoader) LoadBundleWalkFunc(path string, f os.FileInfo, err err // LoadPackagesWalkFunc attempts to unmarshal the file at the given path into a PackageManifest resource. // If unmarshaling is successful, the PackageManifest is added to the loader's store. -func (d *DirectoryLoader) LoadPackagesWalkFunc(path string, f os.FileInfo, err error) error { +func (d *DirectoryLoader) LoadPackagesWalkFunc(path string, f os.FileInfo, _ error) error { if f == nil { return fmt.Errorf("invalid file: %v", f) } @@ -163,7 +165,6 @@ func (d *DirectoryLoader) LoadPackagesWalkFunc(path string, f os.FileInfo, err e if err != nil { return fmt.Errorf("could not decode contents of file %s into package: %s", path, err) } - } if manifest.PackageName == "" { return nil diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go index f8a5a13501..9592b5f54b 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/load.go @@ -69,7 +69,7 @@ func (s *sqlLoader) AddOperatorBundle(bundle *registry.Bundle) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -123,6 +123,7 @@ func (s *sqlLoader) addOperatorBundle(tx *sql.Tx, bundle *registry.Bundle) error } if substitutesFor != "" && !s.enableAlpha { + // nolint:stylecheck return fmt.Errorf("SubstitutesFor is an alpha-only feature. You must enable alpha features with the flag --enable-alpha in order to use this feature.") } @@ -162,7 +163,6 @@ func (s *sqlLoader) addOperatorBundle(tx *sql.Tx, bundle *registry.Bundle) error } func (s *sqlLoader) addSubstitutesFor(tx *sql.Tx, bundle *registry.Bundle) error { - updateBundleReplaces, err := tx.Prepare("update operatorbundle set replaces = ? where replaces = ?") if err != nil { return err @@ -205,6 +205,7 @@ func (s *sqlLoader) addSubstitutesFor(tx *sql.Tx, bundle *registry.Bundle) error if err != nil { return fmt.Errorf("failed to obtain substitutes : %s", err) } + // nolint:nestif if substitutesFor != "" { // Update any replaces that reference the substituted-for bundle _, err = updateBundleReplaces.Exec(csvName, substitutesFor) @@ -407,7 +408,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() var errs []error @@ -507,6 +508,7 @@ func (s *sqlLoader) AddPackageChannelsFromGraph(graph *registry.Package) error { // If the number of nodes is 5 and the startDepth is 3, the expected depth is 7 (3, 4, 5, 6, 7) expectedDepth := len(channel.Nodes) + startDepth - 1 if expectedDepth != depth { + // nolint:stylecheck err := fmt.Errorf("Invalid graph: some (non-bottom) nodes defined in the graph were not mentioned as replacements of any node (%d != %d)", expectedDepth, depth) errs = append(errs, err) } @@ -533,7 +535,7 @@ func (s *sqlLoader) AddPackageChannels(manifest registry.PackageManifest) error return fmt.Errorf("unable to start a transaction: %s", err) } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmPackage(tx, manifest.PackageName); err != nil { @@ -591,6 +593,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani return fmt.Errorf("failed to add package %q: %s", manifest.PackageName, err.Error()) } + // nolint:prealloc var ( errs []error channels []registry.PackageChannel @@ -717,6 +720,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani // If we find 'replaces' in the circuit list then we've seen it already, break out if _, ok := replaceCycle[replaces]; ok { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Cycle detected, %s replaces %s", channelEntryCSVName, replaces)) break } @@ -732,6 +736,7 @@ func (s *sqlLoader) addPackageChannels(tx *sql.Tx, manifest registry.PackageMani break } if _, _, _, err := s.getBundleSkipsReplacesVersion(tx, replaces); err != nil { + // nolint:stylecheck errs = append(errs, fmt.Errorf("Invalid bundle %s, replaces nonexistent bundle %s", c.CurrentCSVName, replaces)) break } @@ -750,7 +755,7 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() removeNonHeadBundles, err := tx.Prepare(` @@ -773,34 +778,37 @@ func (s *sqlLoader) ClearNonHeadBundles() error { return tx.Commit() } -func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) (replaces string, skips []string, version string, err error) { +func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) (string, []string, string, error) { getReplacesSkipsAndVersions, err := tx.Prepare(` SELECT replaces, skips, version FROM operatorbundle WHERE operatorbundle.name=? LIMIT 1`) if err != nil { - return + return "", nil, "", err } defer getReplacesSkipsAndVersions.Close() rows, rerr := getReplacesSkipsAndVersions.Query(bundleName) if err != nil { err = rerr - return + return "", nil, "", err } defer rows.Close() if !rows.Next() { err = fmt.Errorf("no bundle found for bundlename %s", bundleName) - return + return "", nil, "", err } var replacesStringSQL sql.NullString var skipsStringSQL sql.NullString var versionStringSQL sql.NullString if err = rows.Scan(&replacesStringSQL, &skipsStringSQL, &versionStringSQL); err != nil { - return + return "", nil, "", err } + var replaces string + var skips []string + var version string if replacesStringSQL.Valid { replaces = replacesStringSQL.String } @@ -811,40 +819,41 @@ func (s *sqlLoader) getBundleSkipsReplacesVersion(tx *sql.Tx, bundleName string) version = versionStringSQL.String } - return + return replaces, skips, version, nil } -func (s *sqlLoader) getBundlePathIfExists(tx *sql.Tx, bundleName string) (bundlePath string, err error) { +func (s *sqlLoader) getBundlePathIfExists(tx *sql.Tx, bundleName string) (string, error) { getBundlePath, err := tx.Prepare(` SELECT bundlepath FROM operatorbundle WHERE operatorbundle.name=? LIMIT 1`) if err != nil { - return + return "", err } defer getBundlePath.Close() rows, rerr := getBundlePath.Query(bundleName) if err != nil { err = rerr - return + return "", err } defer rows.Close() if !rows.Next() { // no bundlepath set - return + return "", nil } var bundlePathSQL sql.NullString if err = rows.Scan(&bundlePathSQL); err != nil { - return + return "", err } + var bundlePath string if bundlePathSQL.Valid { bundlePath = bundlePathSQL.String } - return + return bundlePath, nil } func (s *sqlLoader) addAPIs(tx *sql.Tx, bundle *registry.Bundle) error { @@ -950,7 +959,7 @@ func (s *sqlLoader) RemovePackage(packageName string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() csvNames, err := s.getCSVNames(tx, packageName) @@ -1059,7 +1068,7 @@ func (s *sqlLoader) AddBundlePackageChannels(manifest registry.PackageManifest, return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.addOperatorBundle(tx, bundle); err != nil { @@ -1343,7 +1352,7 @@ type tailBundle struct { replacedBy []string // to handle any chain where a skipped entry may be a part of another channel that should not be truncated } -func getTailFromBundle(tx *sql.Tx, head string) (bundles map[string]tailBundle, err error) { +func getTailFromBundle(tx *sql.Tx, head string) (map[string]tailBundle, error) { // traverse replaces chain and collect channel list for each bundle. // This assumes that replaces chain for a bundle is the same across channels. // only real bundles with entries in the operator_bundle table are returned. @@ -1392,7 +1401,7 @@ func getTailFromBundle(tx *sql.Tx, head string) (bundles map[string]tailBundle, return nil, fmt.Errorf("could not find default channel head for %s", head) } var defaultChannelHead sql.NullString - err = row.Scan(&defaultChannelHead) + err := row.Scan(&defaultChannelHead) if err != nil { return nil, fmt.Errorf("error getting default channel head for %s: %v", head, err) } @@ -1481,7 +1490,7 @@ func (s *sqlLoader) DeprecateBundle(path string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() name, version, err := getBundleNameAndVersionForImage(tx, path) @@ -1550,7 +1559,6 @@ deprecate: if err := s.rmBundle(tx, bundle); err != nil { return err } - } // remove links to deprecated/truncated bundles to avoid regenerating these on add/overwrite _, err = tx.Exec(`UPDATE channel_entry SET replaces=NULL WHERE operatorbundle_name=?`, name) @@ -1592,7 +1600,7 @@ func (s *sqlLoader) RemoveStrandedBundles() error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() if err := s.rmStrandedBundles(tx); err != nil { @@ -1742,7 +1750,7 @@ func (d *DeprecationAwareLoader) clearLastDeprecatedInPackage(pkg string) error return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // The last deprecated bundles for a package will still have "tombstone" records in channel_entry (among other tables). @@ -1770,7 +1778,7 @@ func (s sqlLoader) RemoveOverwrittenChannelHead(pkg, bundle string) error { return err } defer func() { - tx.Rollback() + _ = tx.Rollback() }() // check if bundle has anything that replaces it getBundlesThatReplaceHeadQuery := `SELECT DISTINCT operatorbundle.name AS replaces, channel_entry.channel_name @@ -1795,6 +1803,7 @@ func (s sqlLoader) RemoveOverwrittenChannelHead(pkg, bundle string) error { return err } // This is not a head bundle for all channels it is a member of. Cannot remove + // nolint: staticcheck return fmt.Errorf("cannot overwrite bundle %s from package %s: replaced by %s on channel %s", bundle, pkg, replaces.String, channel.String) } } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go index 0196064d64..218f2cda13 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/loadprocs.go @@ -41,6 +41,7 @@ func addReplaces(tx *sql.Tx, replacesID, entryID int64) error { return nil } +// nolint:unused func addPackage(tx *sql.Tx, packageName string) error { addPackage, err := tx.Prepare("insert into package(name) values(?)") if err != nil { @@ -71,6 +72,7 @@ func addPackageIfNotExists(tx *sql.Tx, packageName string) error { return nil } +// nolint:unused func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { addChannel, err := tx.Prepare("insert into channel(name, package_name, head_operatorbundle_name) values(?, ?, ?)") if err != nil { @@ -86,6 +88,7 @@ func addChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error return nil } +// nolint:unused func updateChannel(tx *sql.Tx, channelName, packageName, headCsvName string) error { updateChannel, err := tx.Prepare("update channel set head_operatorbundle_name = ? where name = ? and package_name = ?") if err != nil { @@ -96,7 +99,6 @@ func updateChannel(tx *sql.Tx, channelName, packageName, headCsvName string) err _, err = updateChannel.Exec(channelName, packageName, headCsvName) if err != nil { return fmt.Errorf("failed to update channel (%s) for package (%s) with head (%s) : %s", channelName, packageName, headCsvName, err) - } return nil diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go index 3b3c8c36b9..e4511bfb22 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/001_related_images.go @@ -45,25 +45,25 @@ func getCSV(ctx context.Context, tx *sql.Tx, name string) (*registry.ClusterServ return nil, err } - var csvJson sql.NullString + var csvJSON sql.NullString if !rows.Next() { return nil, fmt.Errorf("bundle %s not found", name) } - if err := rows.Scan(&csvJson); err != nil { + if err := rows.Scan(&csvJSON); err != nil { return nil, err } - if !csvJson.Valid { + if !csvJSON.Valid { return nil, fmt.Errorf("bad value for csv") } csv := ®istry.ClusterServiceVersion{} - if err := json.Unmarshal([]byte(csvJson.String), csv); err != nil { + if err := json.Unmarshal([]byte(csvJSON.String), csv); err != nil { return nil, err } return csv, nil } func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into related_image(image, operatorbundle_name) values(?,?)` + addSQL := `insert into related_image(image, operatorbundle_name) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling related images: %v", err) @@ -83,7 +83,7 @@ func extractRelatedImages(ctx context.Context, tx *sql.Tx, name string) error { images[k] = struct{}{} } for img := range images { - if _, err := tx.ExecContext(ctx, addSql, img, name); err != nil { + if _, err := tx.ExecContext(ctx, addSQL, img, name); err != nil { logrus.Warnf("error backfilling related images: %v", err) continue } @@ -101,7 +101,7 @@ var relatedImagesMigration = &Migration{ FOREIGN KEY(operatorbundle_name) REFERENCES operatorbundle(name) ); ` - _, err := tx.ExecContext(ctx, sql) + _, _ = tx.ExecContext(ctx, sql) bundles, err := listBundles(ctx, tx) if err != nil { diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go index 0253c5119b..f25d285ab7 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/003_required_apis.go @@ -9,14 +9,15 @@ import ( "github.com/sirupsen/logrus" ) +// nolint:stylecheck const RequiredApiMigrationKey = 3 // Register this migration func init() { - registerMigration(RequiredApiMigrationKey, requiredApiMigration) + registerMigration(RequiredApiMigrationKey, requiredAPIMigration) } -var requiredApiMigration = &Migration{ +var requiredAPIMigration = &Migration{ Id: RequiredApiMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { sql := ` @@ -37,8 +38,8 @@ var requiredApiMigration = &Migration{ if err != nil { return err } - for entryId, bundle := range bundles { - if err := extractRequiredApis(ctx, tx, entryId, bundle); err != nil { + for entryID, bundle := range bundles { + if err := extractRequiredApis(ctx, tx, entryID, bundle); err != nil { logrus.Warnf("error backfilling required apis: %v", err) continue } @@ -67,20 +68,20 @@ func getChannelEntryBundles(ctx context.Context, tx *sql.Tx) (map[int64]string, entries := map[int64]string{} for rows.Next() { - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString - if err = rows.Scan(&entryId, &name); err != nil { + if err = rows.Scan(&entryID, &name); err != nil { return nil, err } - if !entryId.Valid || !name.Valid { + if !entryID.Valid || !name.Valid { continue } - entries[entryId.Int64] = name.String + entries[entryID.Int64] = name.String } return entries, nil } -func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name string) error { +func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryID int64, name string) error { addAPI, err := tx.Prepare("insert or replace into api(group_name, version, kind, plural) values(?, ?, ?, ?)") if err != nil { return err @@ -91,12 +92,12 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st } }() - addApiRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") + addAPIRequirer, err := tx.Prepare("insert into api_requirer(group_name, version, kind, channel_entry_id) values(?, ?, ?, ?)") if err != nil { return err } defer func() { - if err := addApiRequirer.Close(); err != nil { + if err := addAPIRequirer.Close(); err != nil { logrus.WithError(err).Warningf("error closing prepared statement") } }() @@ -107,7 +108,7 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st return err } - _, requiredCRDs, err := csv.GetCustomResourceDefintions() + _, requiredCRDs, _ := csv.GetCustomResourceDefintions() for _, crd := range requiredCRDs { plural, group, err := SplitCRDName(crd.Name) if err != nil { @@ -116,17 +117,17 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st if _, err := addAPI.Exec(group, crd.Version, crd.Kind, plural); err != nil { return err } - if _, err := addApiRequirer.Exec(group, crd.Version, crd.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(group, crd.Version, crd.Kind, entryID); err != nil { return err } } - _, requiredAPIs, err := csv.GetApiServiceDefinitions() + _, requiredAPIs, _ := csv.GetApiServiceDefinitions() for _, api := range requiredAPIs { if _, err := addAPI.Exec(api.Group, api.Version, api.Kind, api.Name); err != nil { return err } - if _, err := addApiRequirer.Exec(api.Group, api.Version, api.Kind, entryId); err != nil { + if _, err := addAPIRequirer.Exec(api.Group, api.Version, api.Kind, entryID); err != nil { return err } } @@ -134,14 +135,13 @@ func extractRequiredApis(ctx context.Context, tx *sql.Tx, entryId int64, name st return nil } -func SplitCRDName(crdName string) (plural, group string, err error) { +func SplitCRDName(crdName string) (string, string, error) { + var err error pluralGroup := strings.SplitN(crdName, ".", 2) if len(pluralGroup) != 2 { err = fmt.Errorf("can't split bad CRD name %s", crdName) - return + return "", "", err } - plural = pluralGroup[0] - group = pluralGroup[1] - return + return pluralGroup[0], pluralGroup[1], nil } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go index 60b3c87ada..6a825debc3 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/005_version_skiprange.go @@ -75,7 +75,7 @@ var versionSkipRangeMigration = &Migration{ } func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { - addSql := `insert into operatorbundle(version, skiprange) values(?,?)` + addSQL := `insert into operatorbundle(version, skiprange) values(?,?)` csv, err := getCSV(ctx, tx, name) if err != nil { logrus.Warnf("error backfilling versioning: %v", err) @@ -89,6 +89,6 @@ func extractVersioning(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { version = "" } - _, err = tx.ExecContext(ctx, addSql, version, skiprange) + _, err = tx.ExecContext(ctx, addSQL, version, skiprange) return err } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go index f70436f1d2..0e57e67fc3 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/006_associate_apis_with_bundle.go @@ -11,7 +11,7 @@ const AssociateApisWithBundleMigrationKey = 6 // Register this migration func init() { - registerMigration(AssociateApisWithBundleMigrationKey, bundleApiMigration) + registerMigration(AssociateApisWithBundleMigrationKey, bundleAPIMigration) } // This migration moves the link between the provided and required apis table from the channel_entry to the @@ -24,7 +24,7 @@ func init() { // api_provider: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), // api_requirer: FOREIGN KEY(operatorbundle_name, operatorbundle_version, operatorbundle_path) REFERENCES operatorbundle(name, version, bundlepath), -var bundleApiMigration = &Migration{ +var bundleAPIMigration = &Migration{ Id: AssociateApisWithBundleMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { createNew := ` diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go index 7825e89fe9..2340634bee 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/007_replaces_skips.go @@ -97,12 +97,12 @@ func extractReplaces(ctx context.Context, tx *sql.Tx, name string) error { if err != nil { return err } - updateSql := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` - _, err = tx.ExecContext(ctx, updateSql, replaces, strings.Join(skips, ","), name) + updateSQL := `update operatorbundle SET replaces = ?, skips = ? WHERE name = ?;` + _, err = tx.ExecContext(ctx, updateSQL, replaces, strings.Join(skips, ","), name) return err } -func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (replaces string, skips []string, err error) { +func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (string, []string, error) { getReplacees := ` SELECT DISTINCT replaces.operatorbundle_name FROM channel_entry @@ -117,26 +117,28 @@ func getReplacesAndSkips(ctx context.Context, tx *sql.Tx, name string) (replaces } defer rows.Close() + var replaces string if rows.Next() { var replaceeName sql.NullString if err = rows.Scan(&replaceeName); err != nil { - return + return "", nil, err } if replaceeName.Valid { replaces = replaceeName.String } } + var skips []string skips = []string{} for rows.Next() { var skipName sql.NullString if err = rows.Scan(&skipName); err != nil { - return + return "", nil, err } if !skipName.Valid { continue } skips = append(skips, skipName.String) } - return + return replaces, skips, nil } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go index 0466756116..252ad99ecc 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/009_properties.go @@ -75,12 +75,12 @@ var propertiesMigration = &Migration{ } // update the serialized value to omit the dependency type - updateDependencySql := ` + updateDependencySQL := ` UPDATE dependencies SET value = (SELECT json_remove(value, "$.type") FROM dependencies WHERE operatorbundle_name=dependencies.operatorbundle_name)` - _, err = tx.ExecContext(ctx, updateDependencySql) + _, err = tx.ExecContext(ctx, updateDependencySQL) if err != nil { return err } @@ -111,6 +111,7 @@ func getPackageForBundle(ctx context.Context, name string, tx *sql.Tx) (string, if !pkg.Valid { return "", err } + // nolint: staticcheck return pkg.String, nil } return "", err diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go index bee961621c..d488775b0f 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/010_set_bundlepath_pkg_property.go @@ -15,12 +15,12 @@ func init() { var bundlePathPkgPropertyMigration = &Migration{ Id: BundlePathPkgMigrationKey, Up: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = (SELECT bundlepath FROM operatorbundle WHERE operatorbundle_name = operatorbundle.name AND operatorbundle_version = operatorbundle.version)` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } @@ -28,11 +28,11 @@ var bundlePathPkgPropertyMigration = &Migration{ return nil }, Down: func(ctx context.Context, tx *sql.Tx) error { - updatePropertiesSql := ` + updatePropertiesSQL := ` UPDATE properties SET operatorbundle_path = null WHERE type = "olm.package"` - _, err := tx.ExecContext(ctx, updatePropertiesSql) + _, err := tx.ExecContext(ctx, updatePropertiesSQL) if err != nil { return err } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go index 760b381ff7..e99480d581 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/012_deprecated.go @@ -29,6 +29,7 @@ var deprecatedMigration = &Migration{ return err } + // nolint: gosec initDeprecated := fmt.Sprintf(`INSERT OR REPLACE INTO deprecated(operatorbundle_name) SELECT operatorbundle_name FROM properties WHERE properties.type='%s'`, registry.DeprecatedType) _, err := tx.ExecContext(ctx, initDeprecated) diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go index b9bb60fbaf..475bb7cd69 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrations/migrations.go @@ -8,6 +8,7 @@ import ( ) type Migration struct { + // nolint:stylecheck Id int Up func(context.Context, *sql.Tx) error Down func(context.Context, *sql.Tx) error diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go index 82bacc834c..9f1438ab53 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/migrator.go @@ -3,6 +3,7 @@ package sqlite import ( "context" "database/sql" + "errors" "fmt" "strings" @@ -86,12 +87,12 @@ func (m *SQLLiteMigrator) Up(ctx context.Context, migrations migrations.Migratio } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version+1 { + if migration.Id != currentVersion+1 { return fmt.Errorf("migration applied out of order") } @@ -127,12 +128,12 @@ func (m *SQLLiteMigrator) Down(ctx context.Context, migrations migrations.Migrat } for _, migration := range migrations { - current_version, err := m.version(ctx, tx) + currentVersion, err := m.version(ctx, tx) if err != nil { return err } - if migration.Id != current_version { + if migration.Id != currentVersion { return fmt.Errorf("migration applied out of order") } @@ -175,7 +176,7 @@ func (m *SQLLiteMigrator) tableExists(tx *sql.Tx, table string) (bool, error) { return exists, nil } -func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (version int, err error) { +func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (int, error) { tableExists, err := m.tableExists(tx, m.migrationsTable) if err != nil { return NilVersion, err @@ -185,9 +186,10 @@ func (m *SQLLiteMigrator) version(ctx context.Context, tx *sql.Tx) (version int, } query := `SELECT version FROM ` + m.migrationsTable + ` LIMIT 1` + var version int err = tx.QueryRowContext(ctx, query).Scan(&version) switch { - case err == sql.ErrNoRows: + case errors.Is(err, sql.ErrNoRows): return NilVersion, nil case err != nil: return NilVersion, err @@ -200,10 +202,12 @@ func (m *SQLLiteMigrator) setVersion(ctx context.Context, tx *sql.Tx, version in if err := m.ensureMigrationTable(ctx, tx); err != nil { return err } + // nolint: gosec _, err := tx.ExecContext(ctx, "DELETE FROM "+m.migrationsTable) if err != nil { return err } + // nolint: gosec _, err = tx.ExecContext(ctx, "INSERT INTO "+m.migrationsTable+"(version) values(?)", version) return err } diff --git a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go index 24880f1fca..7a42981f45 100644 --- a/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go +++ b/vendor/github.com/operator-framework/operator-registry/pkg/sqlite/query.go @@ -63,6 +63,7 @@ func NewSQLLiteQuerier(dbFilename string, opts ...SQLiteQuerierOption) (*SQLQuer return NewSQLLiteQuerierFromDb(db, opts...), nil } +// nolint:stylecheck func NewSQLLiteQuerierFromDb(db *sql.DB, opts ...SQLiteQuerierOption) *SQLQuerier { return NewSQLLiteQuerierFromDBQuerier(dbQuerierAdapter{db}, opts...) } @@ -241,13 +242,13 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s %s", pkgName, channelName, csvName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var name sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &name, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -265,7 +266,7 @@ func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvNam out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -315,18 +316,18 @@ WHERE channel.name = :channel AND channel.package_name = :package` }, nil } -func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name FROM channel_entry LEFT OUTER JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id WHERE replaces.operatorbundle_name = ?` rows, err := s.db.QueryContext(ctx, query, name) if err != nil { - return + return nil, err } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString @@ -334,7 +335,7 @@ func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name stri var bundleNameSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ PackageName: pkgNameSQL.String, @@ -345,9 +346,9 @@ func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name stri } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that replace %s", name) - return + return nil, err } - return + return entries, nil } func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, channelName string) (*api.Bundle, error) { @@ -365,13 +366,13 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c if !rows.Next() { return nil, fmt.Errorf("no entry found for %s %s", pkgName, channelName) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var outName sql.NullString var bundle sql.NullString var bundlePath sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil { return nil, err } @@ -389,7 +390,7 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -411,7 +412,7 @@ func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, c return out, nil } -func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*registry.ChannelEntry, error) { // TODO: join on full fk, not just operatorbundlename query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name FROM channel_entry @@ -433,7 +434,7 @@ func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, ve } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString @@ -441,7 +442,7 @@ func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, ve var bundleNameSQL sql.NullString var replacesSQL sql.NullString if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL); err != nil { - return + return nil, err } entries = append(entries, ®istry.ChannelEntry{ @@ -453,13 +454,13 @@ func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, ve } if len(entries) == 0 { err = fmt.Errorf("no channel entries found that provide %s %s %s", group, version, kind) - return + return nil, err } - return + return entries, nil } // Get latest channel entries that provide an api -func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*registry.ChannelEntry, err error) { +func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) ([]*registry.ChannelEntry, error) { query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name, MIN(channel_entry.depth) FROM channel_entry INNER JOIN properties ON channel_entry.operatorbundle_name = properties.operatorbundle_name @@ -482,15 +483,15 @@ func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, gro } defer rows.Close() - entries = []*registry.ChannelEntry{} + var entries []*registry.ChannelEntry for rows.Next() { var pkgNameSQL sql.NullString var channelNameSQL sql.NullString var bundleNameSQL sql.NullString var replacesSQL sql.NullString - var min_depth sql.NullInt64 - if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &min_depth); err != nil { + var minDepth sql.NullInt64 + if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &minDepth); err != nil { return nil, err } @@ -518,7 +519,7 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio WHERE properties.type = ? AND properties.value = ? AND package.default_channel = channel_entry.channel_name GROUP BY channel_entry.package_name, channel_entry.channel_name` - value, err := json.Marshal(map[string]string{ + value, _ := json.Marshal(map[string]string{ "group": group, "version": apiVersion, "kind": kind, @@ -532,17 +533,17 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio if !rows.Next() { return nil, fmt.Errorf("no entry found that provides %s %s %s", group, apiVersion, kind) } - var entryId sql.NullInt64 + var entryID sql.NullInt64 var bundle sql.NullString var bundlePath sql.NullString - var min_depth sql.NullInt64 + var minDepth sql.NullInt64 var bundleName sql.NullString var pkgName sql.NullString var channelName sql.NullString var replaces sql.NullString var version sql.NullString var skipRange sql.NullString - if err := rows.Scan(&entryId, &bundle, &bundlePath, &min_depth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { + if err := rows.Scan(&entryID, &bundle, &bundlePath, &minDepth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil { return nil, err } @@ -564,7 +565,7 @@ func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersio out.Version = version.String out.SkipRange = skipRange.String - provided, required, err := s.GetApisForEntry(ctx, entryId.Int64) + provided, required, err := s.GetApisForEntry(ctx, entryID.Int64) if err != nil { return nil, err } @@ -627,7 +628,7 @@ func (s *SQLQuerier) GetImagesForBundle(ctx context.Context, csvName string) ([] return images, nil } -func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) { +func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) ([]*api.GroupVersionKind, []*api.GroupVersionKind, error) { groups := map[string]struct{}{} kinds := map[string]struct{}{} versions := map[string]struct{}{} @@ -642,7 +643,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } defer providedRows.Close() - provided = []*api.GroupVersionKind{} + var provided []*api.GroupVersionKind for providedRows.Next() { var value sql.NullString @@ -678,7 +679,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } defer requiredRows.Close() - required = []*api.GroupVersionKind{} + var required []*api.GroupVersionKind for requiredRows.Next() { var value sql.NullString @@ -770,7 +771,7 @@ func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provid } required[i].Plural = plural } - return + return provided, required, nil } func (s *SQLQuerier) GetBundleVersion(ctx context.Context, image string) (string, error) { @@ -809,6 +810,7 @@ func (s *SQLQuerier) GetBundlePathsForPackage(ctx context.Context, pkgName strin return nil, err } if imgName.Valid && imgName.String == "" { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find paths to bundle images") } images = append(images, imgName.String) @@ -844,6 +846,7 @@ func (s *SQLQuerier) GetBundlesForPackage(ctx context.Context, pkgName string) ( key.Version = version.String } if key.IsEmpty() { + // nolint: stylecheck return nil, fmt.Errorf("Index malformed: cannot find identifier for bundle in package %s", pkgName) } bundles[key] = struct{}{} @@ -1047,7 +1050,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) + _ = buildLegacyRequiredAPIs(out.Dependencies, &out.RequiredApis) out.Dependencies = uniqueDeps(out.Dependencies) if props.Valid { @@ -1055,7 +1058,7 @@ func (s *SQLQuerier) SendBundles(ctx context.Context, stream registry.BundleSend return err } } - buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) + _ = buildLegacyProvidedAPIs(out.Properties, &out.ProvidedApis) out.Properties = uniqueProps(out.Properties) if err := stream.Send(out); err != nil { return err @@ -1079,7 +1082,6 @@ func (s *SQLQuerier) ListBundles(ctx context.Context) ([]*api.Bundle, error) { return nil, err } return bundleSender, nil - } func buildLegacyRequiredAPIs(src []*api.Dependency, dst *[]*api.GroupVersionKind) error { @@ -1150,7 +1152,7 @@ func uniqueProps(props []*api.Property) []*api.Property { return list } -func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) { +func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) ([]*api.Dependency, error) { depQuery := `SELECT DISTINCT type, value FROM dependencies WHERE operatorbundle_name=? AND (operatorbundle_version=? OR operatorbundle_version is NULL) @@ -1162,7 +1164,7 @@ func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version } defer rows.Close() - dependencies = []*api.Dependency{} + var dependencies []*api.Dependency for rows.Next() { var typeName sql.NullString var value sql.NullString @@ -1179,10 +1181,10 @@ func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version }) } - return + return dependencies, nil } -func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) (properties []*api.Property, err error) { +func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) ([]*api.Property, error) { propQuery := `SELECT DISTINCT type, value FROM properties WHERE operatorbundle_name=? AND (operatorbundle_version=? OR operatorbundle_version is NULL) @@ -1194,7 +1196,7 @@ func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, } defer rows.Close() - properties = []*api.Property{} + var properties []*api.Property for rows.Next() { var typeName sql.NullString var value sql.NullString @@ -1211,10 +1213,10 @@ func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, }) } - return + return properties, nil } -func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName string) (bundlePath string, err error) { +func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName string) (string, error) { getBundlePathQuery := ` SELECT bundlepath FROM operatorbundle @@ -1222,26 +1224,27 @@ func (s *SQLQuerier) GetBundlePathIfExists(ctx context.Context, bundleName strin rows, err := s.db.QueryContext(ctx, getBundlePathQuery, bundleName) if err != nil { - return + return "", err } defer rows.Close() if !rows.Next() { // no bundlepath set err = registry.ErrBundleImageNotInDatabase - return + return "", err } var bundlePathSQL sql.NullString if err = rows.Scan(&bundlePathSQL); err != nil { - return + return "", err } + var bundlePath string if bundlePathSQL.Valid { bundlePath = bundlePathSQL.String } - return + return bundlePath, nil } // ListRegistryBundles returns a set of registry bundles. diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 9fa948ebf9..ed4d259db2 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -6,5 +6,7 @@ cover.out cover-*.out /.idea *.iml +/bbolt /cmd/bbolt/bbolt +.DS_Store diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index 013173af5e..d8c40e539c 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.22.6 +1.23.6 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 2140779741..f5a6703a0b 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -1,6 +1,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" +GOFILES = $(shell find . -name \*.go) TESTFLAGS_RACE=-race=false ifdef ENABLE_RACE @@ -13,9 +14,26 @@ ifdef CPU endif TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) +TESTFLAGS_TIMEOUT=30m +ifdef TIMEOUT + TESTFLAGS_TIMEOUT=$(TIMEOUT) +endif + +TESTFLAGS_ENABLE_STRICT_MODE=false +ifdef ENABLE_STRICT_MODE + TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE) +endif + +.EXPORT_ALL_VARIABLES: +TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} + .PHONY: fmt fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh" + @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') + + @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" + @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: @@ -24,21 +42,23 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic BOLT_CMD=bbolt @@ -55,7 +75,7 @@ gofail-enable: install-gofail gofail enable . .PHONY: gofail-disable -gofail-disable: +gofail-disable: install-gofail gofail disable . .PHONY: install-gofail @@ -65,12 +85,24 @@ install-gofail: .PHONY: test-failpoint test-failpoint: @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - -.PHONY: test-robustness # Running robustness tests requires root permission -test-robustness: - go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ./tests/robustness -test.root + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + +.PHONY: test-robustness # Running robustness tests requires root permission for now +# TODO: Remove sudo once we fully migrate to the prow infrastructure +test-robustness: gofail-enable build + sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root + +.PHONY: test-benchmark-compare +# Runs benchmark tests on the current git ref and the given REF, and compares +# the two. +test-benchmark-compare: install-benchstat + @git fetch + ./scripts/compare_benchmarks.sh $(REF) + +.PHONY: install-benchstat +install-benchstat: + go install golang.org/x/perf/cmd/benchstat@latest diff --git a/vendor/go.etcd.io/bbolt/OWNERS b/vendor/go.etcd.io/bbolt/OWNERS new file mode 100644 index 0000000000..91f168a798 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala +reviewers: + - fuweid # Wei Fu + - tjungblu # Thomas Jungblut diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index 495a93ef8f..f365e51e3e 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -1,10 +1,8 @@ bbolt ===== -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Go Report Card](https://goreportcard.com/badge/go.etcd.io/bbolt?style=flat-square)](https://goreportcard.com/report/go.etcd.io/bbolt) +[![Go Reference](https://pkg.go.dev/badge/go.etcd.io/bbolt.svg)](https://pkg.go.dev/go.etcd.io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) @@ -71,13 +69,14 @@ New minor versions may add additional features to the API. - [LMDB](#lmdb) - [Caveats & Limitations](#caveats--limitations) - [Reading the Source](#reading-the-source) + - [Known Issues](#known-issues) - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started ### Installing -To start using Bolt, install Go and run `go get`: +To start using `bbolt`, install Go and run `go get`: ```sh $ go get go.etcd.io/bbolt@latest ``` @@ -103,7 +102,7 @@ To use bbolt as an embedded key-value store, import as: ```go import bolt "go.etcd.io/bbolt" -db, err := bolt.Open(path, 0666, nil) +db, err := bolt.Open(path, 0600, nil) if err != nil { return err } @@ -298,6 +297,17 @@ db.Update(func(tx *bolt.Tx) error { }) ``` +You can retrieve an existing bucket using the `Tx.Bucket()` function: +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + if b == nil { + return errors.New("bucket does not exist") + } + return nil +}) +``` + You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can @@ -305,6 +315,17 @@ guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. +You can also iterate over all existing top-level buckets with `Tx.ForEach()`: + +```go +db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, b *bolt.Bucket) error { + fmt.Println(string(name)) + return nil + }) + return nil +}) +``` ### Using key/value pairs @@ -336,7 +357,17 @@ exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. -Use the `Bucket.Delete()` function to delete a key from the bucket. +Use the `Bucket.Delete()` function to delete a key from the bucket: + +```go +db.Update(func (tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Delete([]byte("answer")) + return err +}) +``` + +This will delete the key `answers` from the bucket `MyBucket`. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction @@ -654,7 +685,7 @@ uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } @@ -890,7 +921,7 @@ The best places to start are the main entry points into Bolt: - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket + where the key & value will be written. Once the position is found, the bucket materializes the underlying page and the page's parent pages into memory as "nodes". These nodes are where mutations occur during read-write transactions. These changes get flushed to disk during commit. @@ -919,6 +950,21 @@ The best places to start are the main entry points into Bolt: If you have additional notes that could be helpful for others, please submit them via pull request. +## Known Issues + +- bbolt might run into data corruption issue on Linux when the feature + [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in + linux kernel version v5.10, is enabled. The fixes to the issue were included in + linux kernel version v5.17, please refer to links below, + + * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/) + * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e) + * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/) + + Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. + +- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value. + Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802). ## Other Projects Using Bolt @@ -934,13 +980,16 @@ Below is a list of public, open source projects that use Bolt: * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. * [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. @@ -964,6 +1013,7 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_aix.go similarity index 99% rename from vendor/go.etcd.io/bbolt/bolt_unix_aix.go rename to vendor/go.etcd.io/bbolt/bolt_aix.go index 6dea4294dc..4b424ed4c4 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ b/vendor/go.etcd.io/bbolt/bolt_aix.go @@ -1,5 +1,4 @@ //go:build aix -// +build aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_android.go b/vendor/go.etcd.io/bbolt/bolt_android.go new file mode 100644 index 0000000000..11890f0d70 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_android.go @@ -0,0 +1,90 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go index 447bc19733..2c67ab10cd 100644 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -1,5 +1,4 @@ //go:build arm64 -// +build arm64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_loong64.go b/vendor/go.etcd.io/bbolt/bolt_loong64.go index 31c17c1d07..1ef2145c67 100644 --- a/vendor/go.etcd.io/bbolt/bolt_loong64.go +++ b/vendor/go.etcd.io/bbolt/bolt_loong64.go @@ -1,5 +1,4 @@ //go:build loong64 -// +build loong64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go index a9385beb68..f28a0512a1 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -1,5 +1,4 @@ //go:build mips64 || mips64le -// +build mips64 mips64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go index ed734ff7f3..708fccdc01 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -1,5 +1,4 @@ //go:build mips || mipsle -// +build mips mipsle package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go index e403f57d8a..6a21cf33c7 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -1,5 +1,4 @@ //go:build ppc -// +build ppc package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go index fcd86529f9..a32f246228 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -1,5 +1,4 @@ //go:build ppc64 -// +build ppc64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go index 20234aca46..8fb60dddcb 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -1,5 +1,4 @@ //go:build ppc64le -// +build ppc64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go index 060f30c73c..a63d26ab21 100644 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -1,5 +1,4 @@ //go:build riscv64 -// +build riscv64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go index 92d2755adb..749ea97e3a 100644 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -1,5 +1,4 @@ //go:build s390x -// +build s390x package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_solaris.go similarity index 100% rename from vendor/go.etcd.io/bbolt/bolt_unix_solaris.go rename to vendor/go.etcd.io/bbolt/bolt_solaris.go diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 757ae4d1a4..d1922c2d99 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,5 +1,4 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix +//go:build !windows && !plan9 && !solaris && !aix && !android package bbolt @@ -10,6 +9,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/errors" ) // flock acquires an advisory lock on a file descriptor. @@ -36,7 +37,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index e5dde27454..ec21ecb85c 100644 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -8,6 +8,8 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "go.etcd.io/bbolt/errors" ) // fdatasync flushes written data to a file descriptor. @@ -42,7 +44,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. @@ -70,7 +72,7 @@ func mmap(db *DB, sz int) error { return fmt.Errorf("truncate: %s", err) } sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff + sizelo = uint32(sz) } // Open a file mapping handle. @@ -93,7 +95,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go index 81e09a5310..27face752e 100644 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !linux && !openbsd -// +build !windows,!plan9,!linux,!openbsd package bbolt diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index f3533d3446..6371ace972 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "unsafe" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) const ( @@ -14,8 +17,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - const ( minFillPercent = 0.1 maxFillPercent = 1.0 @@ -27,12 +28,12 @@ const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache + *common.InBucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *common.Page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[common.Pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this @@ -42,21 +43,12 @@ type Bucket struct { FillPercent float64 } -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) + b.nodes = make(map[common.Pgid]*node) } return b } @@ -67,8 +59,8 @@ func (b *Bucket) Tx() *Tx { } // Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root +func (b *Bucket) Root() common.Pgid { + return b.RootPage() } // Writable returns whether the bucket is writable. @@ -105,7 +97,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket { k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 { return nil } @@ -125,8 +117,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Unaligned access requires a copy to be made. const unalignedMask = unsafe.Alignof(struct { - bucket - page + common.InBucket + common.Page }{}) - 1 unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { @@ -136,15 +128,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = &common.InBucket{} + *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0])) } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + if child.RootPage() == 0 { + child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) } return &child @@ -153,13 +145,23 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket %q successfully", key) + } + }() + } if b.tx.db == nil { - return nil, ErrTxClosed + return nil, errors.ErrTxClosed } else if !b.tx.writable { - return nil, ErrTxNotWritable + return nil, errors.ErrTxNotWritable } else if len(key) == 0 { - return nil, ErrBucketNameRequired + return nil, errors.ErrBucketNameRequired } // Insert into node. @@ -173,21 +175,21 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(newKey, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists + if (flags & common.BucketLeafFlag) != 0 { + return nil, errors.ErrBucketExists } - return nil, ErrIncompatibleValue + return nil, errors.ErrIncompatibleValue } // Create empty, inline bucket. var bucket = Bucket{ - bucket: &bucket{}, + InBucket: &common.InBucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } var value = bucket.write() - c.node().put(newKey, newKey, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket @@ -200,39 +202,108 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err +func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket if not exist %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", key) + } + }() + } + + if b.tx.db == nil { + return nil, errors.ErrTxClosed + } else if !b.tx.writable { + return nil, errors.ErrTxNotWritable + } else if len(key) == 0 { + return nil, errors.ErrBucketNameRequired + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + if b.buckets != nil { + if child := b.buckets[string(newKey)]; child != nil { + return child, nil + } + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if there is an existing non-bucket key. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(newKey)] = child + } + + return child, nil + } + return nil, errors.ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + InBucket: &common.InBucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, } - return child, nil + var value = bucket.write() + + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(newKey), nil } // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { +func (b *Bucket) DeleteBucket(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", key, err) + } else { + lg.Debugf("Deleting bucket %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { + child := b.Bucket(newKey) + err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -243,7 +314,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -251,19 +322,119 @@ func (b *Bucket) DeleteBucket(key []byte) error { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Moving bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", key, err) + } else { + lg.Debugf("Moving bucket %q successfully", key) + } + }() + } + + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !b.Writable() || !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx { + lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path()) + return errors.ErrDifferentDB + } + + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + lg.Errorf("An incompatible key %s exists in the source bucket", newKey) + return errors.ErrIncompatibleValue + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket) + return errors.ErrSameBuckets + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(newKey) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + lg.Errorf("An incompatible key %s exists in the target bucket", newKey) + return errors.ErrIncompatibleValue + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(newKey)) + c.node().del(newKey) + + // add te sub-bucket to the destination bucket + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + +// Inspect returns the structure of the bucket. +func (b *Bucket) Inspect() BucketStructure { + return b.recursivelyInspect([]byte("root")) +} + +func (b *Bucket) recursivelyInspect(name []byte) BucketStructure { + bs := BucketStructure{Name: string(name)} + + keyN := 0 + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&common.BucketLeafFlag != 0 { + childBucket := b.Bucket(k) + childBS := childBucket.recursivelyInspect(k) + bs.Children = append(bs.Children, childBS) + } else { + keyN++ + } + } + bs.KeyN = keyN + + return bs +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. +// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { + if (flags & common.BucketLeafFlag) != 0 { return nil } @@ -278,17 +449,27 @@ func (b *Bucket) Get(key []byte) []byte { // If the key exist then its previous value will be overwritten. // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { +func (b *Bucket) Put(key []byte, value []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Putting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", key, err) + } else { + lg.Debugf("Putting key %q successfully", key) + } + }() + } if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } else if len(key) == 0 { - return ErrKeyRequired + return errors.ErrKeyRequired } else if len(key) > MaxKeySize { - return ErrKeyTooLarge + return errors.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge + return errors.ErrValueTooLarge } // Insert into node. @@ -301,8 +482,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // gofail: var beforeBucketPut struct{} @@ -315,11 +496,22 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { +func (b *Bucket) Delete(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", key, err) + } else { + lg.Debugf("Deleting key %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -332,8 +524,8 @@ func (b *Bucket) Delete(key []byte) error { } // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -343,44 +535,46 @@ func (b *Bucket) Delete(key []byte) error { } // Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } +func (b *Bucket) Sequence() uint64 { + return b.InSequence() +} // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Set the sequence. - b.bucket.sequence = v + b.SetInSequence(v) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, ErrTxClosed + return 0, errors.ErrTxClosed } else if !b.Writable() { - return 0, ErrTxNotWritable + return 0, errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil + b.IncSequence() + return b.Sequence(), nil } // ForEach executes a function for each key/value pair in a bucket. @@ -390,7 +584,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -403,11 +597,11 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { + if flags&common.BucketLeafFlag != 0 { if err := fn(k); err != nil { return err } @@ -421,64 +615,64 @@ func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 - if b.root == 0 { + if b.RootPage() == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) + b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { + if p.IsLeafPage() { + s.KeyN += int(p.Count()) // used totals the used bytes for the page - used := pageHeaderSize + used := common.PageHeaderSize - if p.count != 0 { + if p.Count() != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) + used += common.LeafPageElementSize * uintptr(p.Count()-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + lastElement := p.LeafPageElement(p.Count() - 1) + used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize()) } - if b.root == 0 { + if b.RootPage() == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) + s.LeafOverflowN += int(p.Overflow()) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + if (e.Flags() & common.BucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) + subStats.Add(b.openBucket(e.Value()).Stats()) } } } - } else if (p.flags & branchPageFlag) != 0 { + } else if p.IsBranchPage() { s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) + lastElement := p.BranchPageElement(p.Count() - 1) // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) + used += uintptr(lastElement.Pos() + lastElement.Ksize()) s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) + s.BranchOverflowN += int(p.Overflow()) } // Keep track of maximum page depth. @@ -499,29 +693,29 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { +func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0, []pgid{b.root}) + fn(b.page, 0, []common.Pgid{b.RootPage()}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) + b.tx.forEachPage(b.RootPage(), fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { +func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } - b._forEachPageNode(b.root, 0, fn) + b._forEachPageNode(b.RootPage(), 0, fn) } -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { +func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) { var p, n = b.pageNode(pgId) // Execute function. @@ -529,16 +723,16 @@ func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, in // Recursively loop over children. if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + b._forEachPageNode(elem.Pgid(), depth+1, fn) } } } else { if !n.isLeaf { for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) + b._forEachPageNode(inode.Pgid(), depth+1, fn) } } } @@ -561,9 +755,9 @@ func (b *Bucket) spill() error { } // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket + value = make([]byte, unsafe.Sizeof(common.InBucket{})) + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *child.InBucket } // Skip writing the bucket if there are no materialized nodes. @@ -577,10 +771,10 @@ func (b *Bucket) spill() error { if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } - if flags&bucketLeafFlag == 0 { + if flags&common.BucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag) } // Ignore if there's not a materialized root node. @@ -595,16 +789,16 @@ func (b *Bucket) spill() error { b.rootNode = b.rootNode.root() // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + if b.rootNode.pgid >= b.tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid())) } - b.root = b.rootNode.pgid + b.SetRootPage(b.rootNode.pgid) return nil } // inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. +// and if it contains no subbuckets. Otherwise, returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode @@ -615,11 +809,11 @@ func (b *Bucket) inlineable() bool { // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. - var size = pageHeaderSize + var size = common.PageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) - if inode.flags&bucketLeafFlag != 0 { + if inode.Flags()&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false @@ -638,14 +832,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr { func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) + var value = make([]byte, common.BucketHeaderSize+n.size()) // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *b.InBucket // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) n.write(p) return value @@ -662,8 +856,8 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") +func (b *Bucket) node(pgId common.Pgid, parent *node) *node { + common.Assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgId]; n != nil { @@ -682,6 +876,12 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { var p = b.page if p == nil { p = b.tx.page(pgId) + } else { + // if p isn't nil, then it's an inline bucket. + // The pgId must be 0 in this case. + common.Verify(func() { + common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId) + }) } // Read the page into the node and cache it. @@ -696,19 +896,19 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { // free recursively frees all pages in the bucket. func (b *Bucket) free() { - if b.root == 0 { + if b.RootPage() == 0 { return } var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { + b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.txid, p) + tx.db.freelist.Free(tx.meta.Txid(), p) } else { n.free() } }) - b.root = 0 + b.SetRootPage(0) } // dereference removes all references to the old mmap. @@ -723,11 +923,11 @@ func (b *Bucket) dereference() { } // pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { +// Otherwise, returns the underlying page. +func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { + if b.RootPage() == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } @@ -797,3 +997,9 @@ func cloneBytes(v []byte) []byte { copy(clone, v) return clone } + +type BucketStructure struct { + Name string `json:"name"` // name of the bucket + KeyN int `json:"keyN"` // number of key/value pairs + Children []BucketStructure `json:"buckets,omitempty"` // child buckets +} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index bbfd92a9bc..0c1e28c106 100644 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "sort" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket @@ -30,9 +33,9 @@ func (c *Cursor) Bucket() *Bucket { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -40,7 +43,7 @@ func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.goToFirstElementOnTheStack() @@ -51,7 +54,7 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil, flags } return k, v, flags @@ -61,9 +64,9 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) @@ -80,7 +83,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -90,9 +93,9 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -102,9 +105,9 @@ func (c *Cursor) Next() (key []byte, value []byte) { // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -115,7 +118,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) { // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.seek(seek) @@ -126,7 +129,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { if k == nil { return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { + } else if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -136,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !c.bucket.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } c.node().del(key) @@ -156,7 +159,7 @@ func (c *Cursor) Delete() error { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] - c.search(seek, c.bucket.root) + c.search(seek, c.bucket.RootPage()) // If this is a bucket then return a nil value. return c.keyValue() @@ -172,11 +175,11 @@ func (c *Cursor) goToFirstElementOnTheStack() { } // Keep adding pages pointing to the first element to the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) @@ -193,11 +196,11 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) @@ -277,10 +280,10 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { } // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { +func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + if p != nil && !p.IsBranchPage() && !p.IsLeafPage() { + panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) @@ -303,7 +306,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) + ret := bytes.Compare(n.inodes[i].Key(), key) if ret == 0 { exact = true } @@ -315,18 +318,18 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) + c.search(key, n.inodes[index].Pgid()) } -func (c *Cursor) searchPage(key []byte, p *page) { +func (c *Cursor) searchPage(key []byte, p *common.Page) { // Binary search for the correct range. - inodes := p.branchPageElements() + inodes := p.BranchPageElements() var exact bool - index := sort.Search(int(p.count), func(i int) bool { + index := sort.Search(int(p.Count()), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) + ret := bytes.Compare(inodes[i].Key(), key) if ret == 0 { exact = true } @@ -338,7 +341,7 @@ func (c *Cursor) searchPage(key []byte, p *page) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, inodes[index].pgid) + c.search(key, inodes[index].Pgid()) } // nsearch searches the leaf node on the top of the stack for a key. @@ -349,16 +352,16 @@ func (c *Cursor) nsearch(key []byte) { // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 + return bytes.Compare(n.inodes[i].Key(), key) != -1 }) e.index = index return } // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 + inodes := p.LeafPageElements() + index := sort.Search(int(p.Count()), func(i int) bool { + return bytes.Compare(inodes[i].Key(), key) != -1 }) e.index = index } @@ -375,17 +378,17 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags + return inode.Key(), inode.Value(), inode.Flags() } // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags + elem := ref.page.LeafPageElement(uint16(ref.index)) + return elem.Key(), elem.Value(), elem.Flags() } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { @@ -395,19 +398,19 @@ func (c *Cursor) node() *node { // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) + n = c.bucket.node(c.stack[0].page.Id(), nil) } for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") + common.Assert(!n.isLeaf, "expected branch node") n = n.childAt(ref.index) } - _assert(n.isLeaf, "expected leaf node") + common.Assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { - page *page + page *common.Page node *node index int } @@ -417,7 +420,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.flags & leafPageFlag) != 0 + return r.page.IsLeafPage() } // count returns the number of inodes or page elements. @@ -425,5 +428,5 @@ func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } - return int(r.page.count) + return int(r.page.Count()) } diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 822798e41a..5c1947e998 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -3,49 +3,28 @@ package bbolt import ( "errors" "fmt" - "hash/fnv" "io" "os" "runtime" - "sort" "sync" "time" "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" + fl "go.etcd.io/bbolt/internal/freelist" ) -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond // FreelistType is the type of the freelist backend type FreelistType string +// TODO(ahrtr): eventually we should (step by step) +// 1. default to `FreelistMapType`; +// 2. remove the `FreelistArrayType`, do not export `FreelistMapType` +// and remove field `FreelistType' from both `DB` and `Options`; const ( // FreelistArrayType indicates backend freelist type is array FreelistArrayType = FreelistType("array") @@ -137,6 +116,8 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool + logger Logger + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -146,15 +127,14 @@ type DB struct { dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta + meta0 *common.Meta + meta1 *common.Meta pageSize int opened bool rwtx *Tx txs []*Tx - freelist *freelist + freelist fl.Interface freelistLoad sync.Once pagePool sync.Pool @@ -191,13 +171,15 @@ func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. +// Open creates and opens a database at the given path with a given file mode. +// If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ +// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 +func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { + db = &DB{ opened: true, } + // Set default options if no options are provided. if options == nil { options = DefaultOptions @@ -211,9 +193,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.Mlock = options.Mlock // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize + db.MaxBatchSize = common.DefaultMaxBatchSize + db.MaxBatchDelay = common.DefaultMaxBatchDelay + db.AllocSize = common.DefaultAllocSize + + if options.Logger == nil { + db.logger = getDiscardLogger() + } else { + db.logger = options.Logger + } + + lg := db.Logger() + if lg != discardLogger { + lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + } flag := os.O_RDWR if options.ReadOnly { @@ -222,6 +222,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // always load free pages in write mode db.PreLoadFreelist = true + flag |= os.O_CREATE } db.openFile = options.OpenFile @@ -230,9 +231,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() + lg.Errorf("failed to open db file (%s): %v", path, err) return nil, err } db.path = db.file.Name() @@ -244,8 +245,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { + if err = flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() + lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err) return nil, err } @@ -254,27 +256,28 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if db.pageSize = options.PageSize; db.pageSize == 0 { // Set the default page size to the OS page size. - db.pageSize = defaultPageSize + db.pageSize = common.DefaultPageSize } // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { + if info, statErr := db.file.Stat(); statErr != nil { _ = db.close() - return nil, err + lg.Errorf("failed to get db file's stats (%s): %v", path, err) + return nil, statErr } else if info.Size() == 0 { // Initialize new files with meta pages. - if err := db.init(); err != nil { + if err = db.init(); err != nil { // clean up file descriptor on initialization fail _ = db.close() + lg.Errorf("failed to initialize db file (%s): %v", path, err) return nil, err } } else { // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { + if db.pageSize, err = db.getPageSize(); err != nil { _ = db.close() - return nil, ErrInvalid + lg.Errorf("failed to get page size from db file (%s): %v", path, err) + return nil, err } } @@ -286,8 +289,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { + if err = db.mmap(options.InitialMmapSize); err != nil { _ = db.close() + lg.Errorf("failed to map db file (%s): %v", path, err) return nil, err } @@ -302,13 +306,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) + tx, txErr := db.Begin(true) if tx != nil { - err = tx.Commit() + txErr = tx.Commit() } - if err != nil { + if txErr != nil { + lg.Errorf("starting readwrite transaction failed: %v", txErr) _ = db.close() - return nil, err + return nil, txErr } } @@ -352,7 +357,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, ErrInvalid + return 0, berrors.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -361,11 +366,11 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { var metaCanRead bool if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -397,13 +402,13 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { bw, err := db.file.ReadAt(buf[:], pos) if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -414,17 +419,29 @@ func (db *DB) loadFreelist() { db.freelist = newFreelist(db.FreelistType) if !db.hasSyncedFreelist() { // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) + db.freelist.Init(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) + db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.free_count() + db.stats.FreePageN = db.freelist.FreeCount() }) } func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist + return db.meta().Freelist() != common.PgidNoFreelist +} + +func (db *DB) fileSize() (int, error) { + info, err := db.file.Stat() + if err != nil { + return 0, fmt.Errorf("file stat error: %w", err) + } + sz := int(info.Size()) + if sz < db.pageSize*2 { + return 0, fmt.Errorf("file size too small %d", sz) + } + return sz, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. @@ -433,21 +450,22 @@ func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } + lg := db.Logger() // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) + var fileSize int + fileSize, err = db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } var size = fileSize if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { + lg.Errorf("getting map size failed: %w", err) return err } @@ -472,6 +490,7 @@ func (db *DB) mmap(minsz int) (err error) { // gofail: var mapError string // return errors.New(mapError) if err = mmap(db, size); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err) return err } @@ -493,15 +512,16 @@ func (db *DB) mmap(minsz int) (err error) { } // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() + db.meta0 = db.page(0).Meta() + db.meta1 = db.page(1).Meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() + err0 := db.meta0.Validate() + err1 := db.meta1.Validate() if err0 != nil && err1 != nil { + lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1) return err0 } @@ -524,6 +544,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("unmap error: %v", err.Error()) } @@ -543,13 +564,13 @@ func (db *DB) mmapSize(size int) (int, error) { // Verify the requested size is not above the maximum allowed. if size > maxMapSize { - return 0, fmt.Errorf("mmap too large") + return 0, errors.New("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder + if remainder := sz % int64(common.MaxMmapStep); remainder > 0 { + sz += int64(common.MaxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. @@ -571,6 +592,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("munlock error: %v", err.Error()) } return nil @@ -580,6 +602,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("mlock error: %v", err.Error()) } return nil @@ -600,42 +623,43 @@ func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag + p := db.pageInBuffer(buf, common.Pgid(i)) + p.SetId(common.Pgid(i)) + p.SetFlags(common.MetaPageFlag) // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() + m := p.Meta() + m.SetMagic(common.Magic) + m.SetVersion(common.Version) + m.SetPageSize(uint32(db.pageSize)) + m.SetFreelist(2) + m.SetRootBucket(common.NewInBucket(3, 0)) + m.SetPgid(4) + m.SetTxid(common.Txid(i)) + m.SetChecksum(m.Sum64()) } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 + p := db.pageInBuffer(buf, common.Pgid(2)) + p.SetId(2) + p.SetFlags(common.FreelistPageFlag) + p.SetCount(0) // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 + p = db.pageInBuffer(buf, common.Pgid(3)) + p.SetId(3) + p.SetFlags(common.LeafPageFlag) + p.SetCount(0) // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { + db.Logger().Errorf("writeAt failed: %w", err) return err } if err := fdatasync(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } - db.filesz = len(buf) return nil } @@ -716,13 +740,31 @@ func (db *DB) close() error { // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { +func (db *DB) Begin(writable bool) (t *Tx, err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + lg.Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + } + if writable { return db.beginRWTx() } return db.beginTx() } +func (db *DB) Logger() Logger { + if db == nil || db.logger == nil { + return getDiscardLogger() + } + return db.logger +} + func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the @@ -738,14 +780,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. @@ -755,6 +797,9 @@ func (db *DB) beginTx() (*Tx, error) { // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) + if db.freelist != nil { + db.freelist.AddReadonlyTXID(t.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -771,7 +816,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, ErrDatabaseReadOnly + return nil, berrors.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -786,49 +831,23 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() + db.freelist.ReleasePendingPages() return t, nil } -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -848,6 +867,9 @@ func (db *DB) removeTx(tx *Tx) { } } n := len(db.txs) + if db.freelist != nil { + db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -1056,7 +1078,20 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } +func (db *DB) Sync() (err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debug("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + lg.Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + } + + return fdatasync(db) +} // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. @@ -1069,37 +1104,37 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") + common.Assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) +func (db *DB) page(id common.Pgid) *common.Page { + pos := id * common.Pgid(db.pageSize) + return (*common.Page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page { + return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)])) } // meta retrieves the current meta page reference. -func (db *DB) meta() *meta { +func (db *DB) meta() *common.Meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { + if db.meta1.Txid() > db.meta0.Txid() { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { + if err := metaA.Validate(); err == nil { return metaA - } else if err := metaB.validate(); err == nil { + } else if err := metaB.Validate(); err == nil { return metaB } @@ -1109,7 +1144,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -1117,17 +1152,18 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } else { buf = make([]byte, count*db.pageSize) } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) + p := (*common.Page)(unsafe.Pointer(&buf[0])) + p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + p.SetId(db.freelist.Allocate(txid, count)) + if p.Id() != 0 { return p, nil } // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize + p.SetId(db.rwtx.meta.Pgid()) + var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) @@ -1135,7 +1171,8 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) + curPgid := db.rwtx.meta.Pgid() + db.rwtx.meta.SetPgid(curPgid + common.Pgid(count)) return p, nil } @@ -1143,7 +1180,13 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. - if sz <= db.filesz { + lg := db.Logger() + fileSize, err := db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } + if sz <= fileSize { return nil } @@ -1162,21 +1205,22 @@ func (db *DB) grow(sz int) error { // gofail: var resizeFileError string // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err) return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("file sync error: %s", err) } if db.Mlock { // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { + if err := db.mrelock(fileSize, sz); err != nil { return fmt.Errorf("mlock/munlock error: %s", err) } } } - db.filesz = sz return nil } @@ -1184,7 +1228,7 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { +func (db *DB) freepages() []common.Pgid { tx, err := db.beginTx() defer func() { err = tx.Rollback() @@ -1196,21 +1240,21 @@ func (db *DB) freepages() []pgid { panic("freepages: failed to open read only tx") } - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) + reachable := make(map[common.Pgid]*common.Page) + nofreed := make(map[common.Pgid]bool) ech := make(chan error) go func() { for e := range ech { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { + var fids []common.Pgid + for i := common.Pgid(2); i < db.meta().Pgid(); i++ { if _, ok := reachable[i]; !ok { fids = append(fids, i) } @@ -1218,11 +1262,17 @@ func (db *DB) freepages() []pgid { return fids } +func newFreelist(freelistType FreelistType) fl.Interface { + if freelistType == FreelistMapType { + return fl.NewHashMapFreelist() + } + return fl.NewArrayFreelist() +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. + // When set to zero it will wait indefinitely. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. @@ -1277,6 +1327,19 @@ type Options struct { // It prevents potential page faults, however // used memory can't be reclaimed. (UNIX only) Mlock bool + + // Logger is the logger used for bbolt. + Logger Logger +} + +func (o *Options) String() string { + if o == nil { + return "{}" + } + + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -1327,65 +1390,3 @@ type Info struct { Data uintptr PageSize int } - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go index f2c3b20ed8..02958c86f5 100644 --- a/vendor/go.etcd.io/bbolt/errors.go +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -1,78 +1,108 @@ package bbolt -import "errors" +import "go.etcd.io/bbolt/errors" // These errors can be returned when opening or calling methods on a DB. var ( // ErrDatabaseNotOpen is returned when a DB instance is accessed before it // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalid = errors.ErrInvalid // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalidMapping = errors.ErrInvalidMapping // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrVersionMismatch = errors.ErrVersionMismatch - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrChecksum = errors.ErrChecksum // ErrTimeout is returned when a database cannot obtain an exclusive lock // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTimeout = errors.ErrTimeout ) // These errors can occur when beginning or committing a Tx. var ( // ErrTxNotWritable is returned when performing a write operation on a // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxNotWritable = errors.ErrTxNotWritable // ErrTxClosed is returned when committing or rolling back a transaction // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxClosed = errors.ErrTxClosed // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly // ErrFreePagesNotLoaded is returned when a readonly transaction without // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded ) // These errors can occur when putting or deleting a value or a bucket. var ( // ErrBucketNotFound is returned when trying to access a bucket that has // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNotFound = errors.ErrBucketNotFound // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketExists = errors.ErrBucketExists // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNameRequired = errors.ErrBucketNameRequired // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyRequired = errors.ErrKeyRequired // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyTooLarge = errors.ErrKeyTooLarge // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrValueTooLarge = errors.ErrValueTooLarge // ErrIncompatibleValue is returned when trying create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrIncompatibleValue = errors.ErrIncompatibleValue ) diff --git a/vendor/go.etcd.io/bbolt/errors/errors.go b/vendor/go.etcd.io/bbolt/errors/errors.go new file mode 100644 index 0000000000..c115289e56 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/errors/errors.go @@ -0,0 +1,84 @@ +// Package errors defines the error variables that may be returned +// during bbolt operations. +package errors + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying to create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") + + // ErrDifferentDB is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are in different database files. + ErrDifferentDB = errors.New("the source and target buckets are in different database files") +) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index dffc7bc749..0000000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,410 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - - // Remove pgids which are allocated by this txid - for pgid, tid := range f.allocs { - if tid == txid { - delete(f.allocs, pgid) - } - } - - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(pgid(0)), idx) - ids := unsafe.Slice((*pgid)(data), count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l) - f.copyall(ids) - } else { - p.count = 0xFFFF - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e73..0000000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/vendor/go.etcd.io/bbolt/internal/common/bucket.go new file mode 100644 index 0000000000..2b4ab1453a --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bucket.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "unsafe" +) + +const BucketHeaderSize = int(unsafe.Sizeof(InBucket{})) + +// InBucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type InBucket struct { + root Pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +func NewInBucket(root Pgid, seq uint64) InBucket { + return InBucket{ + root: root, + sequence: seq, + } +} + +func (b *InBucket) RootPage() Pgid { + return b.root +} + +func (b *InBucket) SetRootPage(id Pgid) { + b.root = id +} + +// InSequence returns the sequence. The reason why not naming it `Sequence` +// is to avoid duplicated name as `(*Bucket) Sequence()` +func (b *InBucket) InSequence() uint64 { + return b.sequence +} + +func (b *InBucket) SetInSequence(v uint64) { + b.sequence = v +} + +func (b *InBucket) IncSequence() { + b.sequence++ +} + +func (b *InBucket) InlinePage(v []byte) *Page { + return (*Page)(unsafe.Pointer(&v[BucketHeaderSize])) +} + +func (b *InBucket) String() string { + return fmt.Sprintf("", b.root, b.sequence) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/inode.go b/vendor/go.etcd.io/bbolt/internal/common/inode.go new file mode 100644 index 0000000000..080b9af789 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/inode.go @@ -0,0 +1,115 @@ +package common + +import "unsafe" + +// Inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type Inode struct { + flags uint32 + pgid Pgid + key []byte + value []byte +} + +type Inodes []Inode + +func (in *Inode) Flags() uint32 { + return in.flags +} + +func (in *Inode) SetFlags(flags uint32) { + in.flags = flags +} + +func (in *Inode) Pgid() Pgid { + return in.pgid +} + +func (in *Inode) SetPgid(id Pgid) { + in.pgid = id +} + +func (in *Inode) Key() []byte { + return in.key +} + +func (in *Inode) SetKey(key []byte) { + in.key = key +} + +func (in *Inode) Value() []byte { + return in.value +} + +func (in *Inode) SetValue(value []byte) { + in.value = value +} + +func ReadInodeFromPage(p *Page) Inodes { + inodes := make(Inodes, int(p.Count())) + isLeaf := p.IsLeafPage() + for i := 0; i < int(p.Count()); i++ { + inode := &inodes[i] + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) + } else { + elem := p.BranchPageElement(uint16(i)) + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) + } + Assert(len(inode.Key()) > 0, "read: zero-length inode key") + } + + return inodes +} + +func WriteInodeToPage(inodes Inodes, p *Page) uint32 { + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + isLeaf := p.IsLeafPage() + for i, item := range inodes { + Assert(len(item.Key()) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.Key()) + len(item.Value()) + b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) + } else { + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) + Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.Key()) + copy(b[l:], item.Value()) + } + + return uint32(off) +} + +func UsedSpaceInPage(inodes Inodes, p *Page) uint32 { + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + for _, item := range inodes { + sz := len(item.Key()) + len(item.Value()) + off += uintptr(sz) + } + + return uint32(off) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/meta.go b/vendor/go.etcd.io/bbolt/internal/common/meta.go new file mode 100644 index 0000000000..055388604a --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/meta.go @@ -0,0 +1,161 @@ +package common + +import ( + "fmt" + "hash/fnv" + "io" + "unsafe" + + "go.etcd.io/bbolt/errors" +) + +type Meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root InBucket + freelist Pgid + pgid Pgid + txid Txid + checksum uint64 +} + +// Validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *Meta) Validate() error { + if m.magic != Magic { + return errors.ErrInvalid + } else if m.version != Version { + return errors.ErrVersionMismatch + } else if m.checksum != m.Sum64() { + return errors.ErrChecksum + } + return nil +} + +// Copy copies one meta object to another. +func (m *Meta) Copy(dest *Meta) { + *dest = *m +} + +// Write writes the meta onto a page. +func (m *Meta) Write(p *Page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = Pgid(m.txid % 2) + p.SetFlags(MetaPageFlag) + + // Calculate the checksum. + m.checksum = m.Sum64() + + m.Copy(p.Meta()) +} + +// Sum64 generates the checksum for the meta. +func (m *Meta) Sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +func (m *Meta) Magic() uint32 { + return m.magic +} + +func (m *Meta) SetMagic(v uint32) { + m.magic = v +} + +func (m *Meta) Version() uint32 { + return m.version +} + +func (m *Meta) SetVersion(v uint32) { + m.version = v +} + +func (m *Meta) PageSize() uint32 { + return m.pageSize +} + +func (m *Meta) SetPageSize(v uint32) { + m.pageSize = v +} + +func (m *Meta) Flags() uint32 { + return m.flags +} + +func (m *Meta) SetFlags(v uint32) { + m.flags = v +} + +func (m *Meta) SetRootBucket(b InBucket) { + m.root = b +} + +func (m *Meta) RootBucket() *InBucket { + return &m.root +} + +func (m *Meta) Freelist() Pgid { + return m.freelist +} + +func (m *Meta) SetFreelist(v Pgid) { + m.freelist = v +} + +func (m *Meta) IsFreelistPersisted() bool { + return m.freelist != PgidNoFreelist +} + +func (m *Meta) Pgid() Pgid { + return m.pgid +} + +func (m *Meta) SetPgid(id Pgid) { + m.pgid = id +} + +func (m *Meta) Txid() Txid { + return m.txid +} + +func (m *Meta) SetTxid(id Txid) { + m.txid = id +} + +func (m *Meta) IncTxid() { + m.txid += 1 +} + +func (m *Meta) DecTxid() { + m.txid -= 1 +} + +func (m *Meta) Checksum() uint64 { + return m.checksum +} + +func (m *Meta) SetChecksum(v uint64) { + m.checksum = v +} + +func (m *Meta) Print(w io.Writer) { + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/page.go b/vendor/go.etcd.io/bbolt/internal/common/page.go new file mode 100644 index 0000000000..ee808967c5 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/page.go @@ -0,0 +1,391 @@ +package common + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const PageHeaderSize = unsafe.Sizeof(Page{}) + +const MinKeysPerPage = 2 + +const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) +const pgidSize = unsafe.Sizeof(Pgid(0)) + +const ( + BranchPageFlag = 0x01 + LeafPageFlag = 0x02 + MetaPageFlag = 0x04 + FreelistPageFlag = 0x10 +) + +const ( + BucketLeafFlag = 0x01 +) + +type Pgid uint64 + +type Page struct { + id Pgid + flags uint16 + count uint16 + overflow uint32 +} + +func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { + return &Page{ + id: id, + flags: flags, + count: count, + overflow: overflow, + } +} + +// Typ returns a human-readable page type string used for debugging. +func (p *Page) Typ() string { + if p.IsBranchPage() { + return "branch" + } else if p.IsLeafPage() { + return "leaf" + } else if p.IsMetaPage() { + return "meta" + } else if p.IsFreelistPage() { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +func (p *Page) IsBranchPage() bool { + return p.flags == BranchPageFlag +} + +func (p *Page) IsLeafPage() bool { + return p.flags == LeafPageFlag +} + +func (p *Page) IsMetaPage() bool { + return p.flags == MetaPageFlag +} + +func (p *Page) IsFreelistPage() bool { + return p.flags == FreelistPageFlag +} + +// Meta returns a pointer to the metadata section of the page. +func (p *Page) Meta() *Meta { + return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *Page) FastCheck(id Pgid) { + Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + Assert(p.IsBranchPage() || + p.IsLeafPage() || + p.IsMetaPage() || + p.IsFreelistPage(), + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// LeafPageElement retrieves the leaf node by index +func (p *Page) LeafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + LeafPageElementSize, int(index))) +} + +// LeafPageElements retrieves a list of leaf nodes. +func (p *Page) LeafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) + return elems +} + +// BranchPageElement retrieves the branch node by index +func (p *Page) BranchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// BranchPageElements retrieves a list of branch nodes. +func (p *Page) BranchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) + return elems +} + +func (p *Page) FreelistPageCount() (int, int) { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + return idx, count +} + +func (p *Page) FreelistPageIds() []Pgid { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + + idx, count := p.FreelistPageCount() + + if count == 0 { + return nil + } + + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx) + ids := unsafe.Slice((*Pgid)(data), count) + + return ids +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *Page) hexdump(n int) { + buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +func (p *Page) PageElementSize() uintptr { + if p.IsLeafPage() { + return LeafPageElementSize + } + return BranchPageElementSize +} + +func (p *Page) Id() Pgid { + return p.id +} + +func (p *Page) SetId(target Pgid) { + p.id = target +} + +func (p *Page) Flags() uint16 { + return p.flags +} + +func (p *Page) SetFlags(v uint16) { + p.flags = v +} + +func (p *Page) Count() uint16 { + return p.count +} + +func (p *Page) SetCount(target uint16) { + p.count = target +} + +func (p *Page) Overflow() uint32 { + return p.overflow +} + +func (p *Page) SetOverflow(target uint32) { + p.overflow = target +} + +func (p *Page) String() string { + return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow) +} + +type Pages []*Page + +func (s Pages) Len() int { return len(s) } +func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid Pgid +} + +func (n *branchPageElement) Pos() uint32 { + return n.pos +} + +func (n *branchPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *branchPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *branchPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *branchPageElement) Pgid() Pgid { + return n.pgid +} + +func (n *branchPageElement) SetPgid(v Pgid) { + n.pgid = v +} + +// Key returns a byte slice of the node key. +func (n *branchPageElement) Key() []byte { + return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement { + return &leafPageElement{ + flags: flags, + pos: pos, + ksize: ksize, + vsize: vsize, + } +} + +func (n *leafPageElement) Flags() uint32 { + return n.flags +} + +func (n *leafPageElement) SetFlags(v uint32) { + n.flags = v +} + +func (n *leafPageElement) Pos() uint32 { + return n.pos +} + +func (n *leafPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *leafPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *leafPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *leafPageElement) Vsize() uint32 { + return n.vsize +} + +func (n *leafPageElement) SetVsize(v uint32) { + n.vsize = v +} + +// Key returns a byte slice of the node key. +func (n *leafPageElement) Key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// Value returns a byte slice of the node value. +func (n *leafPageElement) Value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +func (n *leafPageElement) IsBucketEntry() bool { + return n.flags&uint32(BucketLeafFlag) != 0 +} + +func (n *leafPageElement) Bucket() *InBucket { + if n.IsBucketEntry() { + return LoadBucket(n.Value()) + } else { + return nil + } +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type Pgids []Pgid + +func (s Pgids) Len() int { return len(s) } +func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } + +// Merge returns the sorted union of a and b. +func (a Pgids) Merge(b Pgids) Pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(Pgids, len(a)+len(b)) + Mergepgids(merged, a, b) + return merged +} + +// Mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func Mergepgids(dst, a, b Pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go new file mode 100644 index 0000000000..8ad8279a09 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/types.go @@ -0,0 +1,40 @@ +package common + +import ( + "os" + "runtime" + "time" +) + +// MaxMmapStep is the largest step that can be taken when remapping the mmap. +const MaxMmapStep = 1 << 30 // 1GB + +// Version represents the data file format version. +const Version uint32 = 2 + +// Magic represents a marker value to indicate that a file is a Bolt DB. +const Magic uint32 = 0xED0CDAED + +const PgidNoFreelist Pgid = 0xffffffffffffffff + +// DO NOT EDIT. Copied from the "bolt" package. +const pageMaxAllocSize = 0xFFFFFFF + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DefaultPageSize is the default page size for db which is set to the OS page size. +var DefaultPageSize = os.Getpagesize() + +// Txid represents the internal transaction identifier. +type Txid uint64 diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go similarity index 74% rename from vendor/go.etcd.io/bbolt/unsafe.go rename to vendor/go.etcd.io/bbolt/internal/common/unsafe.go index 7745d32ce1..9b77dd7b2b 100644 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go @@ -1,18 +1,18 @@ -package bbolt +package common import ( "unsafe" ) -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { +func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset) } -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { +func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) } -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { +func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices // // This memory is not allocated from C, but it is unmanaged by Go's @@ -23,5 +23,5 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] + return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/vendor/go.etcd.io/bbolt/internal/common/utils.go b/vendor/go.etcd.io/bbolt/internal/common/utils.go new file mode 100644 index 0000000000..bdf82a7b00 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/utils.go @@ -0,0 +1,64 @@ +package common + +import ( + "fmt" + "io" + "os" + "unsafe" +) + +func LoadBucket(buf []byte) *InBucket { + return (*InBucket)(unsafe.Pointer(&buf[0])) +} + +func LoadPage(buf []byte) *Page { + return (*Page)(unsafe.Pointer(&buf[0])) +} + +func LoadPageMeta(buf []byte) *Meta { + return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) +} + +func CopyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/verify.go b/vendor/go.etcd.io/bbolt/internal/common/verify.go new file mode 100644 index 0000000000..eac95e2630 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/verify.go @@ -0,0 +1,67 @@ +// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go +package common + +import ( + "fmt" + "os" + "strings" +) + +const ENV_VERIFY = "BBOLT_VERIFY" + +type VerificationType string + +const ( + ENV_VERIFY_VALUE_ALL VerificationType = "all" + ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(ENV_VERIFY)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(ENV_VERIFY, string(verification)) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(ENV_VERIFY_VALUE_ALL) +} + +// DisableVerifications unsets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(ENV_VERIFY) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/vendor/go.etcd.io/bbolt/internal/freelist/array.go new file mode 100644 index 0000000000..0cc1ba7150 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/array.go @@ -0,0 +1,108 @@ +package freelist + +import ( + "fmt" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +type array struct { + *shared + + ids []common.Pgid // all free and available free page ids. +} + +func (f *array) Init(ids common.Pgids) { + f.ids = ids + f.reindex() +} + +func (f *array) Allocate(txid common.Txid, n int) common.Pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd common.Pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == common.Pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +func (f *array) FreeCount() int { + return len(f.ids) +} + +func (f *array) freePageIds() common.Pgids { + return f.ids +} + +func (f *array) mergeSpans(ids common.Pgids) { + sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) + f.ids = common.Pgids(f.ids).Merge(ids) +} + +func NewArrayFreelist() Interface { + a := &array{ + shared: newShared(), + ids: []common.Pgid{}, + } + a.Interface = a + return a +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go new file mode 100644 index 0000000000..2b819506bd --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go @@ -0,0 +1,82 @@ +package freelist + +import ( + "go.etcd.io/bbolt/internal/common" +) + +type ReadWriter interface { + // Read calls Init with the page ids stored in the given page. + Read(page *common.Page) + + // Write writes the freelist into the given page. + Write(page *common.Page) + + // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write. + // This should never underestimate the size. + EstimatedWritePageSize() int +} + +type Interface interface { + ReadWriter + + // Init initializes this freelist with the given list of pages. + Init(ids common.Pgids) + + // Allocate tries to allocate the given number of contiguous pages + // from the free list pages. It returns the starting page ID if + // available; otherwise, it returns 0. + Allocate(txid common.Txid, numPages int) common.Pgid + + // Count returns the number of free and pending pages. + Count() int + + // FreeCount returns the number of free pages. + FreeCount() int + + // PendingCount returns the number of pending pages. + PendingCount() int + + // AddReadonlyTXID adds a given read-only transaction id for pending page tracking. + AddReadonlyTXID(txid common.Txid) + + // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking. + RemoveReadonlyTXID(txid common.Txid) + + // ReleasePendingPages releases any pages associated with closed read-only transactions. + ReleasePendingPages() + + // Free releases a page and its overflow for a given transaction id. + // If the page is already free or is one of the meta pages, then a panic will occur. + Free(txId common.Txid, p *common.Page) + + // Freed returns whether a given page is in the free list. + Freed(pgId common.Pgid) bool + + // Rollback removes the pages from a given pending tx. + Rollback(txId common.Txid) + + // Copyall copies a list of all free ids and all pending ids in one sorted list. + // f.count returns the minimum length required for dst. + Copyall(dst []common.Pgid) + + // Reload reads the freelist from a page and filters out pending items. + Reload(p *common.Page) + + // NoSyncReload reads the freelist from Pgids and filters out pending items. + NoSyncReload(pgIds common.Pgids) + + // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available. + freePageIds() common.Pgids + + // pendingPageIds returns all pending pages by transaction id. + pendingPageIds() map[common.Txid]*txPending + + // release moves all page ids for a transaction id (or older) to the freelist. + release(txId common.Txid) + + // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. + releaseRange(begin, end common.Txid) + + // mergeSpans is merging the given pages into the freelist + mergeSpans(ids common.Pgids) +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go new file mode 100644 index 0000000000..8d471f4b5b --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go @@ -0,0 +1,292 @@ +package freelist + +import ( + "fmt" + "reflect" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[common.Pgid]struct{} + +type hashMap struct { + *shared + + freePagesCount uint64 // count of free pages(hashmap version) + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size +} + +func (f *hashMap) Init(pgids common.Pgids) { + // reset the counter when freelist init + f.freePagesCount = 0 + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + + if len(pgids) == 0 { + return + } + + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + size := uint64(1) + start := pgids[0] + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } + + f.reindex() +} + +func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+common.Pgid(n), remain) + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +func (f *hashMap) FreeCount() int { + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) + return int(f.freePagesCount) +} + +func (f *hashMap) freePageIds() common.Pgids { + count := f.FreeCount() + if count == 0 { + return common.Pgids{} + } + + m := make([]common.Pgid, 0, count) + + startPageIds := make([]common.Pgid, 0, len(f.forwardMap)) + for k := range f.forwardMap { + startPageIds = append(startPageIds, k) + } + sort.Sort(common.Pgids(startPageIds)) + + for _, start := range startPageIds { + if size, ok := f.forwardMap[start]; ok { + for i := 0; i < int(size); i++ { + m = append(m, start+common.Pgid(i)) + } + } + } + + return m +} + +func (f *hashMap) hashmapFreeCountSlow() int { + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +func (f *hashMap) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[common.Pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} + f.freePagesCount += size +} + +func (f *hashMap) delSpan(start common.Pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+common.Pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } + f.freePagesCount -= size +} + +func (f *hashMap) mergeSpans(ids common.Pgids) { + common.Verify(func() { + ids1Freemap := f.idsFromFreemaps() + ids2Forward := f.idsFromForwardMap() + ids3Backward := f.idsFromBackwardMap() + + if !reflect.DeepEqual(ids1Freemap, ids2Forward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap)) + } + if !reflect.DeepEqual(ids1Freemap, ids3Backward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap)) + } + + sort.Sort(ids) + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.freemaps. + if _, ok := ids1Freemap[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps)) + } + } + }) + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - common.Pgid(preSize) + f.delSpan(start, preSize) + + newStart -= common.Pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +// idsFromFreemaps get all free page IDs from f.freemaps. +// used by test only. +func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for size, idSet := range f.freemaps { + for start := range idSet { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps)) + } + ids[id] = struct{}{} + } + } + } + return ids +} + +// idsFromForwardMap get all free page IDs from f.forwardMap. +// used by test only. +func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +// idsFromBackwardMap get all free page IDs from f.backwardMap. +// used by test only. +func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for end, size := range f.backwardMap { + for i := 0; i < int(size); i++ { + id := end - common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +func NewHashMapFreelist() Interface { + hm := &hashMap{ + shared: newShared(), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), + } + hm.Interface = hm + return hm +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go new file mode 100644 index 0000000000..f2d1130083 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go @@ -0,0 +1,310 @@ +package freelist + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +type txPending struct { + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange +} + +type shared struct { + Interface + + readonlyTXIDs []common.Txid // all readonly transaction IDs. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. +} + +func newShared() *shared { + return &shared{ + pending: make(map[common.Txid]*txPending), + allocs: make(map[common.Pgid]common.Txid), + cache: make(map[common.Pgid]struct{}), + } +} + +func (t *shared) pendingPageIds() map[common.Txid]*txPending { + return t.pending +} + +func (t *shared) PendingCount() int { + var count int + for _, txp := range t.pending { + count += len(txp.ids) + } + return count +} + +func (t *shared) Count() int { + return t.FreeCount() + t.PendingCount() +} + +func (t *shared) Freed(pgId common.Pgid) bool { + _, ok := t.cache[pgId] + return ok +} + +func (t *shared) Free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) + } + + // Free page and all its overflow pages. + txp := t.pending[txid] + if txp == nil { + txp = &txPending{} + t.pending[txid] = txp + } + allocTxid, ok := t.allocs[p.Id()] + common.Verify(func() { + if allocTxid == txid { + panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid)) + } + }) + if ok { + delete(t.allocs, p.Id()) + } + + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { + // Verify that page is not already free. + if _, ok := t.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + t.cache[id] = struct{}{} + } +} + +func (t *shared) Rollback(txid common.Txid) { + // Remove page ids from cache. + txp := t.pending[txid] + if txp == nil { + return + } + for i, pgid := range txp.ids { + delete(t.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + t.allocs[pgid] = tx + } else { + // A writing TXN should never free a page which was allocated by itself. + panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid)) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(t.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range t.allocs { + if tid == txid { + delete(t.allocs, pgid) + } + } +} + +func (t *shared) AddReadonlyTXID(tid common.Txid) { + t.readonlyTXIDs = append(t.readonlyTXIDs, tid) +} + +func (t *shared) RemoveReadonlyTXID(tid common.Txid) { + for i := range t.readonlyTXIDs { + if t.readonlyTXIDs[i] == tid { + last := len(t.readonlyTXIDs) - 1 + t.readonlyTXIDs[i] = t.readonlyTXIDs[last] + t.readonlyTXIDs = t.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +func (t *shared) ReleasePendingPages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(t.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(t.readonlyTXIDs) > 0 { + minid = t.readonlyTXIDs[0] + } + if minid > 0 { + t.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range t.readonlyTXIDs { + t.releaseRange(minid, tid-1) + minid = tid + 1 + } + t.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} + +func (t *shared) release(txid common.Txid) { + m := make(common.Pgids, 0) + for tid, txp := range t.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +func (t *shared) releaseRange(begin, end common.Txid) { + if begin > end { + return + } + m := common.Pgids{} + for tid, txp := range t.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +// Copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (t *shared) Copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, t.PendingCount()) + for _, txp := range t.pendingPageIds() { + m = append(m, txp.ids...) + } + sort.Sort(m) + common.Mergepgids(dst, t.freePageIds(), m) +} + +func (t *shared) Reload(p *common.Page) { + t.Read(p) + t.NoSyncReload(t.freePageIds()) +} + +func (t *shared) NoSyncReload(pgIds common.Pgids) { + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + a := []common.Pgid{} + for _, id := range pgIds { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (t *shared) reindex() { + free := t.freePageIds() + pending := t.pendingPageIds() + t.cache = make(map[common.Pgid]struct{}, len(free)) + for _, id := range free { + t.cache[id] = struct{}{} + } + for _, txp := range pending { + for _, pendingID := range txp.ids { + t.cache[pendingID] = struct{}{} + } + } +} + +func (t *shared) Read(p *common.Page) { + if !p.IsFreelistPage() { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) + } + + ids := p.FreelistPageIds() + + // Copy the list of page ids from the freelist. + if len(ids) == 0 { + t.Init([]common.Pgid{}) + } else { + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]common.Pgid, len(ids)) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(common.Pgids(idsCopy)) + + t.Init(idsCopy) + } +} + +func (t *shared) EstimatedWritePageSize() int { + n := t.Count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) +} + +func (t *shared) Write(p *common.Page) { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.SetFlags(common.FreelistPageFlag) + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := t.Count() + if l == 0 { + p.SetCount(uint16(l)) + } else if l < 0xFFFF { + p.SetCount(uint16(l)) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l) + t.Copyall(ids) + } else { + p.SetCount(0xFFFF) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l+1) + ids[0] = common.Pgid(l) + t.Copyall(ids[1:]) + } +} diff --git a/vendor/go.etcd.io/bbolt/logger.go b/vendor/go.etcd.io/bbolt/logger.go new file mode 100644 index 0000000000..fb250894a2 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/logger.go @@ -0,0 +1,113 @@ +package bbolt + +// See https://github.com/etcd-io/raft/blob/main/logger.go +import ( + "fmt" + "io" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func getDiscardLogger() Logger { + return discardLogger +} + +var ( + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go index 744a972f51..9a0fd332c9 100644 --- a/vendor/go.etcd.io/bbolt/mlock_unix.go +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package bbolt diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 9c56150d88..022b1001e2 100644 --- a/vendor/go.etcd.io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -4,7 +4,8 @@ import ( "bytes" "fmt" "sort" - "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // node represents an in-memory, deserialized page. @@ -14,10 +15,10 @@ type node struct { unbalanced bool spilled bool key []byte - pgid pgid + pgid common.Pgid parent *node children nodes - inodes inodes + inodes common.Inodes } // root returns the top-level node this node is attached to. @@ -38,10 +39,10 @@ func (n *node) minKeys() int { // size returns the size of the node after serialization. func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) } return int(sz) } @@ -50,10 +51,10 @@ func (n *node) size() int { // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) if sz >= v { return false } @@ -64,9 +65,9 @@ func (n *node) sizeLessThan(v uintptr) bool { // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() uintptr { if n.isLeaf { - return leafPageElementSize + return common.LeafPageElementSize } - return branchPageElementSize + return common.BranchPageElementSize } // childAt returns the child node at a given index. @@ -74,12 +75,12 @@ func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } - return n.bucket.node(n.inodes[index].pgid, n) + return n.bucket.node(n.inodes[index].Pgid(), n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 }) return index } @@ -113,9 +114,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.Pgid() { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid())) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -123,30 +124,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { } // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey) if !exact { - n.inodes = append(n.inodes, inode{}) + n.inodes = append(n.inodes, common.Inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") + inode.SetFlags(flags) + inode.SetKey(newKey) + inode.SetValue(value) + inode.SetPgid(pgId) + common.Assert(len(inode.Key()) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 }) // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) { return } @@ -158,30 +159,15 @@ func (n *node) del(key []byte) { } // read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } +func (n *node) read(p *common.Page) { + n.pgid = p.Id() + n.isLeaf = p.IsLeafPage() + n.inodes = common.ReadInodeFromPage(p) - // Save first key so we can find the node in the parent when we spill. + // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") + n.key = n.inodes[0].Key() + common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } @@ -190,57 +176,27 @@ func (n *node) read(p *page) { // write writes the items onto one or more pages. // The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set // and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") +func (n *node) write(p *common.Page) { + common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page") // Initialize page. if n.isLeaf { - p.flags = leafPageFlag + p.SetFlags(common.LeafPageFlag) } else { - p.flags = branchPageFlag + p.SetFlags(common.BranchPageFlag) } if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id())) } - p.count = uint16(len(n.inodes)) + p.SetCount(uint16(len(n.inodes))) // Stop here if there are no items to write. - if p.count == 0 { + if p.Count() == 0 { return } - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } + common.WriteInodeToPage(n.inodes, p) // DEBUG ONLY: n.dump() } @@ -273,7 +229,7 @@ func (n *node) split(pageSize uintptr) []*node { func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } @@ -313,17 +269,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize + sz = common.PageHeaderSize // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -360,7 +316,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -371,10 +327,10 @@ func (n *node) spill() error { } // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + if p.Id() >= tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid())) } - node.pgid = p.id + node.pgid = p.Id() node.write(p) node.spilled = true @@ -382,12 +338,12 @@ func (n *node) spill() error { if node.parent != nil { var key = node.key if key == nil { - key = node.inodes[0].key + key = node.inodes[0].Key() } - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") + node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0) + node.key = node.inodes[0].Key() + common.Assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. @@ -415,8 +371,8 @@ func (n *node) rebalance() { // Update statistics. n.bucket.tx.stats.IncRebalance(1) - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 + // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys. + var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } @@ -426,14 +382,14 @@ func (n *node) rebalance() { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) + child := n.bucket.node(n.inodes[0].Pgid(), n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent = n } } @@ -457,53 +413,37 @@ func (n *node) rebalance() { return } - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) + // Merge with right sibling if idx == 0, otherwise left sibling. + var leftNode, rightNode *node + var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { - target = n.nextSibling() + leftNode = n + rightNode = n.nextSibling() } else { - target = n.prevSibling() + leftNode = n.prevSibling() + rightNode = n } - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } + // If both nodes are too small then merge them. + // Reparent all child nodes being moved. + for _, inode := range rightNode.inodes { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { + child.parent.removeChild(child) + child.parent = leftNode + child.parent.children = append(child.parent.children, child) } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() } - // Either this node or the target node was deleted from the parent so rebalance it. + // Copy over inodes from right node to left node and remove right node. + leftNode.inodes = append(leftNode.inodes, rightNode.inodes...) + n.parent.del(rightNode.key) + n.parent.removeChild(rightNode) + delete(n.bucket.nodes, rightNode.pgid) + rightNode.free() + + // Either this node or the sibling node was deleted from the parent so rebalance it. n.parent.rebalance() } @@ -525,20 +465,20 @@ func (n *node) dereference() { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { inode := &n.inodes[i] - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") + key := make([]byte, len(inode.Key())) + copy(key, inode.Key()) + inode.SetKey(key) + common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key") - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value + value := make([]byte, len(inode.Value())) + copy(value, inode.Value()) + inode.SetValue(value) } // Recursively dereference children. @@ -553,7 +493,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } @@ -594,17 +534,5 @@ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 + return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index bb081b031e..0000000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,212 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 766395de3b..7b5db77278 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -5,15 +5,16 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strings" "sync/atomic" "time" "unsafe" -) -// txid represents the internal transaction identifier. -type txid uint64 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" +) // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. @@ -27,9 +28,9 @@ type Tx struct { writable bool managed bool db *DB - meta *meta + meta *common.Meta root Bucket - pages map[pgid]*page + pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() @@ -48,24 +49,27 @@ func (tx *Tx) init(db *DB) { tx.pages = nil // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) + tx.meta = &common.Meta{} + db.meta().Copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root + tx.root.InBucket = &common.InBucket{} + *tx.root.InBucket = *(tx.meta.RootBucket()) // Increment the transaction id and add a page cache for writable transactions. if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) + tx.pages = make(map[common.Pgid]*common.Page) + tx.meta.IncTxid() } } // ID returns the transaction id. func (tx *Tx) ID() int { - return int(tx.meta.txid) + if tx == nil || tx.meta == nil { + return -1 + } + return int(tx.meta.Txid()) } // DB returns a reference to the database that created the transaction. @@ -75,7 +79,7 @@ func (tx *Tx) DB() *DB { // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) + return int64(tx.meta.Pgid()) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. @@ -96,6 +100,11 @@ func (tx *Tx) Stats() TxStats { return tx.stats } +// Inspect returns the structure of the database. +func (tx *Tx) Inspect() BucketStructure { + return tx.root.Inspect() +} + // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. @@ -123,6 +132,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. @@ -137,15 +164,28 @@ func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } -// Commit writes all changes to disk and updates the meta page. +// Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") +func (tx *Tx) Commit() (err error) { + txId := tx.ID() + lg := tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + } + + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } else if !tx.writable { - return ErrTxNotWritable + return berrors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -157,40 +197,43 @@ func (tx *Tx) Commit() error { tx.stats.IncRebalanceTime(time.Since(startTime)) } - opgid := tx.meta.pgid + opgid := tx.meta.Pgid() // spill data onto dirty pages. startTime = time.Now() - if err := tx.root.spill(); err != nil { + if err = tx.root.spill(); err != nil { + lg.Errorf("spilling data onto dirty pages failed: %v", err) tx.rollback() return err } tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. - tx.meta.root.root = tx.root.root + tx.meta.RootBucket().SetRootPage(tx.root.RootPage()) // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + if tx.meta.Freelist() != common.PgidNoFreelist { + tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { - err := tx.commitFreelist() + err = tx.commitFreelist() if err != nil { + lg.Errorf("committing freelist failed: %v", err) return err } } else { - tx.meta.freelist = pgidNoFreelist + tx.meta.SetFreelist(common.PgidNoFreelist) } // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { + if tx.meta.Pgid() > opgid { _ = errors.New("") // gofail: var lackOfDiskSpace string // tx.rollback() // return errors.New(lackOfDiskSpace) - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err) tx.rollback() return err } @@ -198,7 +241,8 @@ func (tx *Tx) Commit() error { // Write dirty pages to disk. startTime = time.Now() - if err := tx.write(); err != nil { + if err = tx.write(); err != nil { + lg.Errorf("writing data failed: %v", err) tx.rollback() return err } @@ -208,11 +252,11 @@ func (tx *Tx) Commit() error { ch := tx.Check() var errs []string for { - err, ok := <-ch + chkErr, ok := <-ch if !ok { break } - errs = append(errs, err.Error()) + errs = append(errs, chkErr.Error()) } if len(errs) > 0 { panic("check fail: " + strings.Join(errs, "\n")) @@ -220,7 +264,8 @@ func (tx *Tx) Commit() error { } // Write meta to disk. - if err := tx.writeMeta(); err != nil { + if err = tx.writeMeta(); err != nil { + lg.Errorf("writeMeta failed: %v", err) tx.rollback() return err } @@ -240,16 +285,14 @@ func (tx *Tx) Commit() error { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id + + tx.db.freelist.Write(p) + tx.meta.SetFreelist(p.Id()) return nil } @@ -257,9 +300,9 @@ func (tx *Tx) commitFreelist() error { // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") + common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -271,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) } tx.close() } @@ -282,17 +325,17 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { if !tx.db.hasSyncedFreelist() { // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) + // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.NoSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -305,9 +348,9 @@ func (tx *Tx) close() { } if tx.writable { // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() + var freelistFreeN = tx.db.freelist.FreeCount() + var freelistPendingN = tx.db.freelist.PendingCount() + var freelistAlloc = tx.db.freelist.EstimatedWritePageSize() // Remove transaction ref & writer lock. tx.db.rwtx = nil @@ -335,7 +378,7 @@ func (tx *Tx) close() { // Copy writes the entire database to a writer. // This function exists for backwards compatibility. // -// Deprecated; Use WriteTo() instead. +// Deprecated: Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -357,13 +400,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.MetaPageFlag) + *page.Meta() = *tx.meta // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() + page.SetId(0) + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err := w.Write(buf) n += int64(nn) if err != nil { @@ -371,9 +414,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() + page.SetId(1) + page.Meta().DecTxid() + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err = w.Write(buf) n += int64(nn) if err != nil { @@ -413,14 +456,16 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { } // allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) +func (tx *Tx) allocate(count int) (*common.Page, error) { + lg := tx.db.Logger() + p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { + lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err) return nil, err } // Save to our page cache. - tx.pages[p.id] = p + tx.pages[p.Id()] = p // Update statistics. tx.stats.IncPageCount(int64(count)) @@ -432,18 +477,19 @@ func (tx *Tx) allocate(count int) (*page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) + lg := tx.db.Logger() + pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } // Clear out page cache early. - tx.pages = make(map[pgid]*page) + tx.pages = make(map[common.Pgid]*common.Page) sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) + rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize) + offset := int64(p.Id()) * int64(tx.db.pageSize) var written uintptr // Write out page in "max allocation" sized chunks. @@ -452,9 +498,10 @@ func (tx *Tx) write() error { if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + lg.Errorf("writeAt failed, offset: %d: %w", offset, err) return err } @@ -474,9 +521,10 @@ func (tx *Tx) write() error { } // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -485,11 +533,11 @@ func (tx *Tx) write() error { for _, p := range pages { // Ignore page sizes over 1 page. // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { + if int(p.Overflow()) != 0 { continue } - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { @@ -503,18 +551,24 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { + // gofail: var beforeWriteMetaError string + // return errors.New(beforeWriteMetaError) + // Create a temporary buffer for the meta page. + lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) + tx.meta.Write(p) // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -527,69 +581,69 @@ func (tx *Tx) writeMeta() error { // page returns a reference to the page with a given id. // If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { +func (tx *Tx) page(id common.Pgid) *common.Page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { - p.fastCheck(id) + p.FastCheck(id) return p } } // Otherwise return directly from the mmap. p := tx.db.page(id) - p.fastCheck(id) + p.FastCheck(id) return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) +func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) { + stack := make([]common.Pgid, 10) stack[0] = pgidnum tx.forEachPageInternal(stack[:1], fn) } -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { +func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) { p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { +func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { + return nil, berrors.ErrTxClosed + } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded + return nil, berrors.ErrFreePagesNotLoaded } // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ + p := tx.db.page(common.Pgid(id)) + info := &common.PageInfo{ ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), + Count: int(p.Count()), + OverflowCount: int(p.Overflow()), } // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { + if tx.db.freelist.Freed(common.Pgid(id)) { info.Type = "free" } else { - info.Type = p.typ() + info.Type = p.Typ() } return info, nil diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go index 75c7c08436..c3ecbb9750 100644 --- a/vendor/go.etcd.io/bbolt/tx_check.go +++ b/vendor/go.etcd.io/bbolt/tx_check.go @@ -3,6 +3,8 @@ package bbolt import ( "encoding/hex" "fmt" + + "go.etcd.io/bbolt/internal/common" ) // Check performs several consistency checks on the database for this transaction. @@ -13,13 +15,10 @@ import ( // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// +// It also allows users to provide a customized `KVStringer` implementation, // so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { +func (tx *Tx) Check(options ...CheckOption) <-chan error { chkConfig := checkConfig{ kvStringer: HexKVStringer(), } @@ -28,18 +27,22 @@ func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { } ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) + go func() { + // Close the channel to signal completion. + defer close(ch) + tx.check(chkConfig, ch) + }() return ch } -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { +func (tx *Tx) check(cfg checkConfig, ch chan error) { // Force loading free list if opened in ReadOnly mode. tx.db.loadFreelist() // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) + freed := make(map[common.Pgid]bool) + all := make([]common.Pgid, tx.db.freelist.Count()) + tx.db.freelist.Copyall(all) for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) @@ -48,118 +51,171 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Track every reachable page. - reachable := make(map[pgid]*page) + reachable := make(map[common.Pgid]*common.Page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.Freelist() != common.PgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ { + reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist()) } } - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + if cfg.pageId == 0 { + // Check the whole db file, starting from the root bucket and + // recursively check all child buckets. + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + // Ensure all pages below high water mark are either reachable or freed. + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } } + } else { + // Check the db file starting from a specified pageId. + if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) { + ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) + return + } + + tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch) } +} - // Close the channel to signal completion. - close(ch) +func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, +func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } + p := tx.page(pageId) - // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + switch { + case p.IsBranchPage(): + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch) } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + case p.IsLeafPage(): + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + if elem.IsBucketEntry() { + inBkt := common.NewInBucket(pageId, 0) + tmpBucket := Bucket{ + InBucket: &inBkt, + rootNode: &node{isLeaf: p.IsLeafPage()}, + FillPercent: DefaultFillPercent, + tx: tx, + } + if child := tmpBucket.Bucket(elem.Key()); child != nil { + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) + } } - reachable[id] = p } + default: + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId) + } +} - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) - } - }) +func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.RootPage() == 0 { + return + } - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } return nil }) } -// recursivelyCheckPages confirms database consistency with respect to b-tree +func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) { + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) + }) + + tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch) +} + +func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { + if p.Id() > hwm { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) + } + + // Ensure each page is only referenced once. + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if !p.IsBranchPage() && !p.IsLeafPage() { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) + } +} + +// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch) } -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are: // - >=`minKeyClosed` (can be nil) // - <`maxKeyOpen` (can be nil) // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, +func (tx *Tx) recursivelyCheckPageKeyOrderInternal( + pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.flags&branchPageFlag != 0: + case p.IsBranchPage(): // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() + if i < len(p.BranchPageElements())-1 { + maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.flags&leafPageFlag != 0: + case p.IsLeafPage(): runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.Key() } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() + if p.Count() > 0 { + return p.LeafPageElement(p.Count() - 1).Key() } default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId) } return maxKeyInSubtree } @@ -168,7 +224,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { +func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) { if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) @@ -194,6 +250,7 @@ func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousK type checkConfig struct { kvStringer KVStringer + pageId uint64 } type CheckOption func(options *checkConfig) @@ -204,6 +261,13 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } } +// WithPageId sets a page ID from which the check command starts to check +func WithPageId(pageId uint64) CheckOption { + return func(c *checkConfig) { + c.pageId = pageId + } +} + // KVStringer allows to prepare human-readable diagnostic messages. type KVStringer interface { KeyToString([]byte) string diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b7..a83a026274 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b7..e555a475f1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) { h.semconv = semconv.NewHTTPServer(c.Meter) } -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -123,6 +117,11 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } + if startTime := StartTimeFromContext(ctx); !startTime.IsZero() { + opts = append(opts, trace.WithTimestamp(startTime)) + requestStartTime = startTime + } + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) defer span.End() @@ -190,14 +189,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index aea171fb26..fbc344cbdd 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -44,7 +44,9 @@ func (w *RespWriterWrapper) Write(p []byte) (int, error) { w.mu.Lock() defer w.mu.Unlock() - w.writeHeader(http.StatusOK) + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } n, err := w.ResponseWriter.Write(p) n1 := int64(n) @@ -80,7 +82,12 @@ func (w *RespWriterWrapper) writeHeader(statusCode int) { // Flush implements [http.Flusher]. func (w *RespWriterWrapper) Flush() { - w.WriteHeader(http.StatusOK) + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } if f, ok := w.ResponseWriter.(http.Flusher); ok { f.Flush() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab86..fb893b2504 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -83,18 +83,26 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { // This will happen if an HTTPServer{} is used insted of NewHTTPServer. return @@ -102,7 +110,7 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. + addOpts := []metric.AddOption{o} s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) @@ -122,11 +130,20 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } -func NewHTTPClient() HTTPClient { +func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. @@ -163,3 +180,48 @@ func (c HTTPClient) ErrorType(err error) attribute.KeyValue { return attribute.KeyValue{} } + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e67..5367732ec5 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + standardizeHTTPMethodMetric(req.Method), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,16 +164,6 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS @@ -190,3 +180,95 @@ func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } + +func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go new file mode 100644 index 0000000000..9476ef01b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/start_time_context.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "time" +) + +type startTimeContextKeyType int + +const startTimeContextKey startTimeContextKeyType = 0 + +// ContextWithStartTime returns a new context with the provided start time. The +// start time will be used for metrics and traces emitted by the +// instrumentation. Only one labeller can be injected into the context. +// Injecting it multiple times will override the previous calls. +func ContextWithStartTime(parent context.Context, start time.Time) context.Context { + return context.WithValue(parent, startTimeContextKey, start) +} + +// StartTimeFromContext retrieves a time.Time from the provided context if one +// is available. If no start time was found in the provided context, a new, +// zero start time is returned and the second return value is false. +func StartTimeFromContext(ctx context.Context) time.Time { + t, _ := ctx.Value(startTimeContextKey).(time.Time) + return t +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d3438..39681ad4b0 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 502c1bdafc..16ef3cb9b9 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.54.0" + return "0.57.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 895c7664be..ae8577ef36 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -12,11 +12,3 @@ go.work go.work.sum gen/ - -/example/dice/dice -/example/namedtracer/namedtracer -/example/otel-collector/otel-collector -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index d09555506f..dbfb2a165a 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -127,8 +127,6 @@ linters-settings: - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - - "**/example/*.go" - - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" - "**/log/*.go" diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 4b361d0269..8f68dbd04a 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,52 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850) +- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850) +- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861) +- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861) +- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861) +- The `go.opentelemetry.io/otel/semconv/v1.27.0` package. + The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894) +- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903) +- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933) +- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932) + +### Changed + +- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924) +- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926) +- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925) +- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931) +- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804) + +### Fixed + +- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892) +- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911) +- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915) +- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944) +- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944) +- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900) + +### Removed + +- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930) + ## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 ### Added @@ -3110,7 +3156,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.32.0...HEAD +[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index bb33965574..22a2e9dbd4 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -629,6 +629,10 @@ should be canceled. ## Approvers and Maintainers +### Triagers + +- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent + ### Approvers ### Maintainers @@ -641,13 +645,13 @@ should be canceled. ### Emeritus -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Evan Torrie](https://github.com/evantorrie), Yahoo -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Aaron Clawson](https://github.com/MadVikingGod) +- [Anthony Mirabella](https://github.com/Aneurysm9) +- [Chester Cheung](https://github.com/hanyuancheung) +- [Evan Torrie](https://github.com/evantorrie) +- [Gustavo Silva Paiva](https://github.com/paivagustavo) +- [Josh MacDonald](https://github.com/jmacd) +- [Liz Fong-Jones](https://github.com/lizthegrey) ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index a1228a2124..b8292a4fb9 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -260,7 +260,7 @@ SEMCONVPKG ?= "semconv/" semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc9..2e7690e43a 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index 81157a71c5..bf27ef0220 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -97,8 +97,8 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), - StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), - EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), @@ -178,7 +178,7 @@ func buildSpanFlags(sc trace.SpanContext) uint32 { flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK } - return uint32(flags) + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative } // spanEvents transforms span Events to an OTLP span events. @@ -192,7 +192,7 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, - TimeUnixNano: uint64(es[i].Time.UnixNano()), + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. Attributes: KeyValues(es[i].Attributes), DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index 3993df927d..2171bee3c8 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -229,7 +229,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context } if c.metadata.Len() > 0 { - ctx = metadata.NewOutgoingContext(ctx, c.metadata) + md := c.metadata + if outMD, ok := metadata.FromOutgoingContext(ctx); ok { + md = metadata.Join(md, outMD) + } + + ctx = metadata.NewOutgoingContext(ctx, md) } // Unify the client stopCtx with the parent. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index 8f84a79963..3ee452ef72 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -125,7 +125,7 @@ func NewGRPCConfig(opts ...GRPCOption) Config { if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } - // Priroritize GRPCCredentials over Insecure (passing both is an error). + // Prioritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { @@ -278,9 +278,7 @@ func WithEndpointURL(v string) GenericOption { cfg.Traces.Endpoint = u.Host cfg.Traces.URLPath = u.Path - if u.Scheme != "https" { - cfg.Traces.Insecure = true - } + cfg.Traces.Insecure = u.Scheme != "https" return cfg }) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 780992528d..c76bedfb16 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.29.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go index 3a0cc42f6a..ae92a42516 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -13,7 +13,7 @@ import ( // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { - Unwrap() metric.Observable + unwrap() metric.Observable } type afCounter struct { @@ -40,7 +40,7 @@ func (i *afCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afCounter) Unwrap() metric.Observable { +func (i *afCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } @@ -71,7 +71,7 @@ func (i *afUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afUpDownCounter) Unwrap() metric.Observable { +func (i *afUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } @@ -102,7 +102,7 @@ func (i *afGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *afGauge) Unwrap() metric.Observable { +func (i *afGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } @@ -133,7 +133,7 @@ func (i *aiCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiCounter) Unwrap() metric.Observable { +func (i *aiCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } @@ -164,7 +164,7 @@ func (i *aiUpDownCounter) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiUpDownCounter) Unwrap() metric.Observable { +func (i *aiUpDownCounter) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } @@ -195,7 +195,7 @@ func (i *aiGauge) setDelegate(m metric.Meter) { i.delegate.Store(ctr) } -func (i *aiGauge) Unwrap() metric.Observable { +func (i *aiGauge) unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index e3db438a09..a6acd8dca6 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,6 +5,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "context" "reflect" "sync" @@ -66,6 +67,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.meters == nil { @@ -472,8 +474,7 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) defer m.mtx.Unlock() if m.delegate != nil { - insts = unwrapInstruments(insts) - return m.delegate.RegisterCallback(f, insts...) + return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...) } reg := ®istration{instruments: insts, function: f} @@ -487,15 +488,11 @@ func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) return reg, nil } -type wrapped interface { - unwrap() metric.Observable -} - func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { - if in, ok := inst.(wrapped); ok { + if in, ok := inst.(unwrapper); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) @@ -515,9 +512,61 @@ type registration struct { unregMu sync.Mutex } -func (c *registration) setDelegate(m metric.Meter) { - insts := unwrapInstruments(c.instruments) +type unwrapObs struct { + embedded.Observer + obs metric.Observer +} + +// unwrapFloat64Observable returns an expected metric.Float64Observable after +// unwrapping the global object. +func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok { + // Note: if the unwrapped object does not + // unwrap as an observable for either of the + // predicates here, it means an internal bug in + // this package. We avoid logging an error in + // this case, because the SDK has to try its + // own type conversion on the object. The SDK + // will see this and be forced to respond with + // its own error. + // + // This code uses a double-nested if statement + // to avoid creating a branch that is + // impossible to cover. + inst = floatObs + } + } + return inst +} + +// unwrapInt64Observable returns an expected metric.Int64Observable after +// unwrapping the global object. +func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable { + if unwrapped, ok := inst.(unwrapper); ok { + if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok { + // See the comment in unwrapFloat64Observable(). + inst = unint + } + } + return inst +} + +func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) { + uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...) +} + +func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) { + uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...) +} +func unwrapCallback(f metric.Callback) metric.Callback { + return func(ctx context.Context, obs metric.Observer) error { + return f(ctx, &unwrapObs{obs: obs}) + } +} + +func (c *registration) setDelegate(m metric.Meter) { c.unregMu.Lock() defer c.unregMu.Unlock() @@ -526,7 +575,7 @@ func (c *registration) setDelegate(m metric.Meter) { return } - reg, err := m.RegisterCallback(c.function, insts...) + reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...) if err != nil { GetErrorHandler().Handle(err) return diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go index e31f442b48..ac65262c65 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -87,6 +87,7 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name: name, version: c.InstrumentationVersion(), schema: c.SchemaURL(), + attrs: c.InstrumentationAttributes(), } if p.tracers == nil { @@ -102,7 +103,12 @@ func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T return t } -type il struct{ name, version, schema string } +type il struct { + name string + version string + schema string + attrs attribute.Set +} // tracer is a placeholder for a trace.Tracer. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go index 728115045b..34852a47b2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -3,6 +3,8 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" +import "go.opentelemetry.io/otel/attribute" + // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the @@ -12,4 +14,6 @@ type Scope struct { Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string + // Attributes of the telemetry emitted by the scope. + Attributes attribute.Set } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index 95a61d61d4..c02aeefdde 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "strings" ) // ErrPartialResource is returned by a detector when complete source @@ -57,62 +56,37 @@ func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { // these errors will be returned. Otherwise, nil is returned. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( - r *Resource - errs detectErrs - err error + r *Resource + err error + e error ) for _, detector := range detectors { if detector == nil { continue } - r, err = detector.Detect(ctx) - if err != nil { - errs = append(errs, err) - if !errors.Is(err, ErrPartialResource) { + r, e = detector.Detect(ctx) + if e != nil { + err = errors.Join(err, e) + if !errors.Is(e, ErrPartialResource) { continue } } - r, err = Merge(res, r) - if err != nil { - errs = append(errs, err) + r, e = Merge(res, r) + if e != nil { + err = errors.Join(err, e) } *res = *r } - if len(errs) == 0 { - return nil - } - if errors.Is(errs, ErrSchemaURLConflict) { - // If there has been a merge conflict, ensure the resource has no - // schema URL. - res.schemaURL = "" - } - return errs -} - -type detectErrs []error - -func (e detectErrs) Error() string { - errStr := make([]string, len(e)) - for i, err := range e { - errStr[i] = fmt.Sprintf("* %s", err) - } - - format := "%d errors occurred detecting resource:\n\t%s" - return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) -} + if err != nil { + if errors.Is(err, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } -func (e detectErrs) Unwrap() error { - switch len(e) { - case 0: - return nil - case 1: - return e[0] + err = fmt.Errorf("error detecting resource: %w", err) } - return e[1:] -} - -func (e detectErrs) Is(target error) bool { - return len(e) != 0 && errors.Is(e[0], target) + return err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 6ac1cdbf7b..cf3c88e15c 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -20,15 +20,13 @@ type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use - // the WithTelemetrySDK(nil) or WithoutBuiltin() options to - // explicitly disable them. + // resource.New() to explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the - // WithHost(nil) or WithoutBuiltin() options to explicitly - // disable them. + // resource.New() to explicitly disable them. host struct{} stringDetector struct { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go index 71386e2da4..3677c83d7d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -10,17 +10,16 @@ import ( "golang.org/x/sys/windows/registry" ) -// implements hostIDReader +// implements hostIDReader. type hostIDReaderWindows struct{} -// read reads MachineGuid from the windows registry key: -// SOFTWARE\Microsoft\Cryptography +// read reads MachineGuid from the Windows registry key: +// SOFTWARE\Microsoft\Cryptography. func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go index 5e3d199d78..a6a5a53c0e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -17,7 +17,6 @@ import ( func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { return "", err } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 1d399a75db..ccc97e1b66 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. + clear(bsp.batch) // Erase elements to let GC collect objects bsp.batch = bsp.batch[:0] if err != nil { @@ -316,7 +317,11 @@ func (bsp *batchSpanProcessor) processQueue() { bsp.batchMutex.Unlock() if shouldExport { if !bsp.timer.Stop() { - <-bsp.timer.C + // Handle both GODEBUG=asynctimerchan=[0|1] properly. + select { + case <-bsp.timer.C: + default: + } } if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 821c83faa1..8c308dd60a 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -12,25 +12,26 @@ import ( // evictedQueue is a FIFO queue with a configurable capacity. type evictedQueue[T any] struct { - queue []T - capacity int - droppedCount int - logDropped func() + queue []T + capacity int + droppedCount int + logDroppedMsg string + logDroppedOnce sync.Once } func newEvictedQueueEvent(capacity int) evictedQueue[Event] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Event]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Event") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Event", } } func newEvictedQueueLink(capacity int) evictedQueue[Link] { // Do not pre-allocate queue, do this lazily. return evictedQueue[Link]{ - capacity: capacity, - logDropped: sync.OnceFunc(func() { global.Warn("limit reached: dropping trace trace.Link") }), + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Link", } } @@ -53,6 +54,10 @@ func (eq *evictedQueue[T]) add(value T) { eq.queue = append(eq.queue, value) } +func (eq *evictedQueue[T]) logDropped() { + eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) }) +} + // copy returns a copy of the evictedQueue. func (eq *evictedQueue[T]) copy() []T { return slices.Clone(eq.queue) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index 14c2e5bebd..185aa7c08f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -139,9 +139,10 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T name = defaultTracerName } is := instrumentation.Scope{ - Name: name, - Version: c.InstrumentationVersion(), - SchemaURL: c.SchemaURL(), + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + Attributes: c.InstrumentationAttributes(), } t, ok := func() (trace.Tracer, bool) { @@ -168,7 +169,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. - global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL, "attributes", is.Attributes) } return t } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 4945f50830..17f883c2c8 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -174,6 +174,17 @@ func (s *recordingSpan) IsRecording() bool { s.mu.Lock() defer s.mu.Unlock() + return s.isRecording() +} + +// isRecording returns if this span is being recorded. If this span has ended +// this will return false. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) isRecording() bool { + if s == nil { + return false + } return s.endTime.IsZero() } @@ -182,11 +193,15 @@ func (s *recordingSpan) IsRecording() bool { // included in the set status when the code is for an error. If this span is // not being recorded than this method does nothing. func (s *recordingSpan) SetStatus(code codes.Code, description string) { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } if s.status.Code > code { return } @@ -210,12 +225,15 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { // attributes the span is configured to have, the last added attributes will // be dropped. func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { - if !s.IsRecording() { + if s == nil || len(attributes) == 0 { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { @@ -233,7 +251,7 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { // Otherwise, add without deduplication. When attributes are read they // will be deduplicated, optimizing the operation. - s.attributes = slices.Grow(s.attributes, len(s.attributes)+len(attributes)) + s.attributes = slices.Grow(s.attributes, len(attributes)) for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. @@ -280,13 +298,17 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) // Now that s.attributes is deduplicated, adding unique attributes up to // the capacity of s will not over allocate s.attributes. - sum := len(attrs) + len(s.attributes) - s.attributes = slices.Grow(s.attributes, min(sum, limit)) + + // max size = limit + maxCap := min(len(attrs)+len(s.attributes), limit) + if cap(s.attributes) < maxCap { + s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes)) + } for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. @@ -296,6 +318,7 @@ func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { if idx, ok := exists[a.Key]; ok { // Perform all updates before dropping, even when at capacity. + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes[idx] = a continue } @@ -386,9 +409,10 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { // the span's duration in case some operation below takes a while. et := monotonicEndTime(s.startTime) - // Do relative expensive check now that we have an end time and see if we - // need to do any more processing. - if !s.IsRecording() { + // Lock the span now that we have an end time and see if we need to do any more processing. + s.mu.Lock() + if !s.isRecording() { + s.mu.Unlock() return } @@ -413,10 +437,11 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } if s.executionTracerTaskEnd != nil { + s.mu.Unlock() s.executionTracerTaskEnd() + s.mu.Lock() } - s.mu.Lock() // Setting endTime to non-zero marks the span as ended and not recording. if config.Timestamp().IsZero() { s.endTime = et @@ -450,7 +475,13 @@ func monotonicEndTime(start time.Time) time.Time { // does not change the Span status. If this span is not being recorded or err is nil // than this method does nothing. func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { - if s == nil || err == nil || !s.IsRecording() { + if s == nil || err == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } @@ -486,14 +517,23 @@ func recordStackTrace() string { } // AddEvent adds an event with the provided name and options. If this span is -// not being recorded than this method does nothing. +// not being recorded then this method does nothing. func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { - if !s.IsRecording() { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { return } s.addEvent(name, o...) } +// addEvent adds an event with the provided name and options. +// +// This method assumes s.mu.Lock is held by the caller. func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} @@ -510,20 +550,21 @@ func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { e.Attributes = e.Attributes[:limit] } - s.mu.Lock() s.events.add(e) - s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than // this method does nothing. func (s *recordingSpan) SetName(name string) { - if !s.IsRecording() { + if s == nil { return } s.mu.Lock() defer s.mu.Unlock() + if !s.isRecording() { + return + } s.name = name } @@ -579,29 +620,26 @@ func (s *recordingSpan) Attributes() []attribute.KeyValue { func (s *recordingSpan) dedupeAttrs() { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. - exists := make(map[attribute.Key]int) - s.dedupeAttrsFromRecord(&exists) + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) } // dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity // using record as the record of unique attribute keys to their index. // // This method assumes s.mu.Lock is held by the caller. -func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { +func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { // Use the fact that slices share the same backing array. unique := s.attributes[:0] for _, a := range s.attributes { - if idx, ok := (*record)[a.Key]; ok { + if idx, ok := record[a.Key]; ok { unique[idx] = a } else { unique = append(unique, a) - (*record)[a.Key] = len(unique) - 1 + record[a.Key] = len(unique) - 1 } } - // s.attributes have element types of attribute.KeyValue. These types are - // not pointers and they themselves do not contain pointer fields, - // therefore the duplicate values do not need to be zeroed for them to be - // garbage collected. + clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects. s.attributes = unique } @@ -657,7 +695,7 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() { + if s == nil { return } if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && @@ -665,6 +703,12 @@ func (s *recordingSpan) AddLink(link trace.Link) { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} // Discard attributes over limit. @@ -678,9 +722,7 @@ func (s *recordingSpan) AddLink(link trace.Link) { l.Attributes = l.Attributes[:limit] } - s.mu.Lock() s.links.add(l) - s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span @@ -755,12 +797,16 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { } func (s *recordingSpan) addChild() { - if !s.IsRecording() { + if s == nil { return } + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } s.childSpanCount++ - s.mu.Unlock() } func (*recordingSpan) private() {} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index b7cede891c..0b214d3fe9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.29.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 6d3c7b1f40..59e2481613 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.31.0" + return "1.32.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index cdebdb5eb7..c04b12f6b7 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,19 +3,13 @@ module-sets: stable-v1: - version: v1.31.0 + version: v1.32.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/dice - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/otlp/otlptrace @@ -29,12 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.53.0 + version: v0.54.0 modules: - - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.7.0 + version: v0.8.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +35,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.10 + version: v0.0.11 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index de1b98211a..2d7486804f 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -877,6 +877,11 @@ func (in *input) parseLineBlock(start Position, token []string, lparen token) *L in.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", in.filename, x.Start.Line, x.Start.LineRune)) case ')': rparen := in.lex() + // Don't preserve blank lines (denoted by a single empty comment, added above) + // at the end of the block. + if len(comments) == 1 && comments[0] == (Comment{}) { + comments = nil + } x.RParen.Before = comments x.RParen.Pos = rparen.pos if !in.peek().isEOL() { diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 8f9e592f87..737d6876d5 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } if fd == nil { // Field is unknown. diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac42..b53805056a 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c76044..669133d04d 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 378b826faa..688aabe434 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -275,7 +274,6 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields @@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } @@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 67a51b327c..d4c94458bd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) case genid.FieldOptions_Lazy_field_number: fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea44c..e1b4130bd2 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd348..a06ccabc2f 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e1f..99bb95bafd 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 7c1f66c8c1..d14d7d93cc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0bae9..229c698013 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f2f..0000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66eaf..0000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 2f7b363ec4..f78b57b046 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -118,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 88c16ae5b7..41c1f74ef8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -45,19 +45,16 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf var childMessage *MessageInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) case fd.Message() != nil && !fd.IsMap(): - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) if fd.IsList() { childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) } else { childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) } default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &coderFieldInfo{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a651..e4580b3ac2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go index e8fb6c35b4..c7de31e243 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/lazy.go +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ fmi := f.validation.mi if fmi == nil { fd := mi.Desc.Fields().ByNumber(f.num) - if fd == nil || !fd.IsWeak() { + if fd == nil { return out, ValidationUnknown } messageName := fd.Message().FullName() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049b4..a51dffbe29 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index fa10a0f5cc..d50423dcb7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -120,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -129,8 +127,6 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset @@ -148,7 +144,6 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, lazyOffset: invalidOffset, @@ -165,28 +160,23 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } case "lazyFields", "XXX_lazyUnmarshalInfo": - si.lazyOffset = offsetOf(f, mi.Exporter) + si.lazyOffset = offsetOf(f) case "XXX_presence": - si.presenceOffset = offsetOf(f, mi.Exporter) + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { @@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d7ec53f074..dd55e8e009 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool { usePresence, _ := usePresenceForField(si, fd) switch { - case fd.IsWeak(): - // Weak fields are no different for opaque. - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): // Oneofs are no different for opaque. fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) @@ -142,7 +139,7 @@ func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflec if ft.Kind() != reflect.Map { panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) } - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) conv := NewConverter(ft, fd) return fieldInfo{ fieldDesc: fd, @@ -196,7 +193,7 @@ func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd prot panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(reflect.PtrTo(ft), fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) return fieldInfo{ fieldDesc: fd, @@ -246,7 +243,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd pro panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() return fieldInfo{ @@ -339,7 +336,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -411,7 +408,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref deref = true } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) var getter func(p pointer) protoreflect.Value if !nullable { @@ -480,7 +477,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() elemType := fs.Type.Elem() @@ -620,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) ( switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): return false, false - case fd.IsWeak(): - return false, false case fd.IsMap(): return false, false case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 31c19b54f8..0d20132fa2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index a740646205..68d4ae32ec 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -269,7 +266,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) // Generate specialized getter functions to avoid going through reflect.Value if nullable { @@ -332,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -419,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -466,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -479,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -497,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 041ebde2de..62f8bf663e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -22,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index b534a3d6db..7b2995dde5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -320,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba94..0000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 386c823aa6..01efc33030 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 2 + Patch = 5 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index a3b5e142d2..4cbf1aeaf7 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 69a0505091..823dbf3ba6 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -132,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index ebcb4a8ab1..9da34998b1 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -149,7 +149,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab29c..ff692436e9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { @@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2ebd..c343d9227b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } @@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } @@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index bf0a0ccdee..697a61b290 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" @@ -125,16 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } - if sep := goFeatures.StripEnumPrefix; sep != nil { - parentFS.StripEnumPrefix = int(*sep) - } - if al := goFeatures.ApiLevel; al != nil { - parentFS.APILevel = int(*al) - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + fields := gf.Descriptor().Fields() + + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d4001..9b880aa8c9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadbaf8..cd7fbc87a4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a551e7ae94..a516337674 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -4360,7 +4361,7 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -5130,16 +5131,16 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } @@ -5292,7 +5293,7 @@ func file_google_protobuf_descriptor_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, NumMessages: 33, NumExtensions: 0, @@ -5304,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go index c432817bb9..8e759fc9f7 100644 --- a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -28,11 +28,7 @@ type extField struct { type Types struct { // atomicExtFiles is used with sync/atomic and hence must be the first word // of the struct to guarantee 64-bit alignment. - // - // TODO(stapelberg): once we only support Go 1.19 and newer, switch this - // field to be of type atomic.Uint64 to guarantee alignment on - // stack-allocated values, too. - atomicExtFiles uint64 + atomicExtFiles atomic.Uint64 extMu sync.Mutex files *protoregistry.Files @@ -90,7 +86,7 @@ func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.Ex func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { // Construct the extension number map lazily, since not every user will need it. // Update the map if new files are added to the registry. - if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() != uint64(t.files.NumFiles()) { t.updateExtensions() } xd := t.extensionsByMessage[extField{message, field}] @@ -133,10 +129,10 @@ func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { func (t *Types) updateExtensions() { t.extMu.Lock() defer t.extMu.Unlock() - if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + if t.atomicExtFiles.Load() == uint64(t.files.NumFiles()) { return } - defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + defer t.atomicExtFiles.Store(uint64(t.files.NumFiles())) t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { t.registerExtensions(fd.Extensions()) t.registerExtensionsInMessages(fd.Messages()) diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index e0b72eaf92..28d24bad79 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,6 +16,7 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) type GoFeatures_APILevel int32 @@ -227,7 +228,7 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, @@ -283,16 +284,16 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} +}) var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } @@ -326,7 +327,7 @@ func file_google_protobuf_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), NumEnums: 2, NumMessages: 1, NumExtensions: 1, @@ -339,7 +340,6 @@ func file_google_protobuf_go_features_proto_init() { ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 191552cce0..497da66e91 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -411,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -427,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -462,7 +463,7 @@ func file_google_protobuf_any_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -473,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 34d76e6cd9..193880d181 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -80,6 +80,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Duration represents a signed, fixed-length span of time represented @@ -288,7 +289,7 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = []byte{ +var file_google_protobuf_duration_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -305,16 +306,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc + file_google_protobuf_duration_proto_rawDescData []byte ) func file_google_protobuf_duration_proto_rawDescGZIP() []byte { file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) }) return file_google_protobuf_duration_proto_rawDescData } @@ -340,7 +341,7 @@ func file_google_protobuf_duration_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -351,7 +352,6 @@ func file_google_protobuf_duration_proto_init() { MessageInfos: file_google_protobuf_duration_proto_msgTypes, }.Build() File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil file_google_protobuf_duration_proto_goTypes = nil file_google_protobuf_duration_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 7fcdd382cc..a5b8657c4b 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -38,6 +38,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // A generic empty message that you can re-use to avoid defining duplicated @@ -85,7 +86,7 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = []byte{ +var file_google_protobuf_empty_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, @@ -98,16 +99,16 @@ var file_google_protobuf_empty_proto_rawDesc = []byte{ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc + file_google_protobuf_empty_proto_rawDescData []byte ) func file_google_protobuf_empty_proto_rawDescGZIP() []byte { file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc))) }) return file_google_protobuf_empty_proto_rawDescData } @@ -133,7 +134,7 @@ func file_google_protobuf_empty_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -144,7 +145,6 @@ func file_google_protobuf_empty_proto_init() { MessageInfos: file_google_protobuf_empty_proto_msgTypes, }.Build() File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil file_google_protobuf_empty_proto_goTypes = nil file_google_protobuf_empty_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e5d7da38c2..041feb0f3e 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -83,6 +83,7 @@ import ( sort "sort" strings "strings" sync "sync" + unsafe "unsafe" ) // `FieldMask` represents a set of symbolic field paths, for example: @@ -503,7 +504,7 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ +var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -519,16 +520,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc + file_google_protobuf_field_mask_proto_rawDescData []byte ) func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc))) }) return file_google_protobuf_field_mask_proto_rawDescData } @@ -554,7 +555,7 @@ func file_google_protobuf_field_mask_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -565,7 +566,6 @@ func file_google_protobuf_field_mask_proto_init() { MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, }.Build() File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil file_google_protobuf_field_mask_proto_goTypes = nil file_google_protobuf_field_mask_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index f2c53ea337..ecdd31ab53 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -128,6 +128,7 @@ import ( reflect "reflect" sync "sync" utf8 "unicode/utf8" + unsafe "unsafe" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -671,7 +672,7 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = []byte{ +var file_google_protobuf_struct_proto_rawDesc = string([]byte{ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, @@ -719,16 +720,16 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc + file_google_protobuf_struct_proto_rawDescData []byte ) func file_google_protobuf_struct_proto_rawDescGZIP() []byte { file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) }) return file_google_protobuf_struct_proto_rawDescData } @@ -773,7 +774,7 @@ func file_google_protobuf_struct_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), NumEnums: 1, NumMessages: 4, NumExtensions: 0, @@ -785,7 +786,6 @@ func file_google_protobuf_struct_proto_init() { MessageInfos: file_google_protobuf_struct_proto_msgTypes, }.Build() File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil file_google_protobuf_struct_proto_goTypes = nil file_google_protobuf_struct_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 9550109aa3..00ac835c0b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -78,6 +78,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Timestamp represents a point in time independent of any time zone or local @@ -297,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ +var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, @@ -314,16 +315,16 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc + file_google_protobuf_timestamp_proto_rawDescData []byte ) func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) }) return file_google_protobuf_timestamp_proto_rawDescData } @@ -349,7 +350,7 @@ func file_google_protobuf_timestamp_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -360,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() { MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, }.Build() File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil file_google_protobuf_timestamp_proto_goTypes = nil file_google_protobuf_timestamp_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 15b424ec12..5de5301063 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -48,6 +48,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // Wrapper message for `double`. @@ -529,7 +530,7 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = []byte{ +var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -563,16 +564,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once - file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc + file_google_protobuf_wrappers_proto_rawDescData []byte ) func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { - file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc))) }) return file_google_protobuf_wrappers_proto_rawDescData } @@ -606,7 +607,7 @@ func file_google_protobuf_wrappers_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)), NumEnums: 0, NumMessages: 9, NumExtensions: 0, @@ -617,7 +618,6 @@ func file_google_protobuf_wrappers_proto_init() { MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, }.Build() File_google_protobuf_wrappers_proto = out.File - file_google_protobuf_wrappers_proto_rawDesc = nil file_google_protobuf_wrappers_proto_goTypes = nil file_google_protobuf_wrappers_proto_depIdxs = nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 65451ccd4b..7293e19e11 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cel.dev/expr v0.18.0 +# cel.dev/expr v0.19.0 ## explicit; go 1.21.1 cel.dev/expr # github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 @@ -118,7 +118,7 @@ github.com/containerd/containerd/api/types github.com/containerd/continuity/devices github.com/containerd/continuity/fs github.com/containerd/continuity/sysx -# github.com/containerd/errdefs v0.3.0 +# github.com/containerd/errdefs v1.0.0 ## explicit; go 1.20 github.com/containerd/errdefs # github.com/containerd/errdefs/pkg v0.3.0 @@ -138,8 +138,8 @@ github.com/containerd/ttrpc # github.com/containerd/typeurl/v2 v2.2.3 ## explicit; go 1.21 github.com/containerd/typeurl/v2 -# github.com/containers/common v0.61.0 -## explicit; go 1.22.6 +# github.com/containers/common v0.62.0 +## explicit; go 1.22.8 github.com/containers/common/pkg/auth github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/completion @@ -212,7 +212,7 @@ github.com/davecgh/go-spew/spew # github.com/distribution/reference v0.6.0 ## explicit; go 1.20 github.com/distribution/reference -# github.com/docker/cli v27.5.1+incompatible +# github.com/docker/cli v28.0.0+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -303,7 +303,7 @@ github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys -# github.com/golang-migrate/migrate/v4 v4.18.1 +# github.com/golang-migrate/migrate/v4 v4.18.2 ## explicit; go 1.22.0 github.com/golang-migrate/migrate/v4/source github.com/golang-migrate/migrate/v4/source/file @@ -380,7 +380,7 @@ github.com/gorilla/mux # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 ## explicit; go 1.21 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime @@ -562,7 +562,7 @@ github.com/operator-framework/api/pkg/validation github.com/operator-framework/api/pkg/validation/errors github.com/operator-framework/api/pkg/validation/interfaces github.com/operator-framework/api/pkg/validation/internal -# github.com/operator-framework/operator-registry v1.50.0 +# github.com/operator-framework/operator-registry v1.51.0 ## explicit; go 1.23.0 github.com/operator-framework/operator-registry/alpha/model github.com/operator-framework/operator-registry/alpha/property @@ -639,9 +639,12 @@ github.com/stretchr/testify/require # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# go.etcd.io/bbolt v1.3.11 -## explicit; go 1.22 +# go.etcd.io/bbolt v1.4.0 +## explicit; go 1.23 go.etcd.io/bbolt +go.etcd.io/bbolt/errors +go.etcd.io/bbolt/internal/common +go.etcd.io/bbolt/internal/freelist # go.etcd.io/etcd/api/v3 v3.5.16 ## explicit; go 1.22 go.etcd.io/etcd/api/v3/authpb @@ -676,13 +679,13 @@ go.opencensus.io/trace/tracestate ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 -## explicit; go 1.21 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 +## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.31.0 +# go.opentelemetry.io/otel v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -699,31 +702,31 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry -# go.opentelemetry.io/otel/metric v1.31.0 +# go.opentelemetry.io/otel/metric v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.29.0 -## explicit; go 1.21 +# go.opentelemetry.io/otel/sdk v1.32.0 +## explicit; go 1.22 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.31.0 +# go.opentelemetry.io/otel/trace v1.32.0 ## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -758,11 +761,11 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/nacl/secretbox golang.org/x/crypto/salsa20/salsa -# golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 +# golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 ## explicit; go 1.22.0 golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.22.0 +# golang.org/x/mod v0.23.0 ## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile @@ -870,7 +873,7 @@ gomodules.xyz/jsonpatch/v2 # google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 +# google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -880,7 +883,7 @@ google.golang.org/genproto/googleapis/api/httpbody ## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.4 => google.golang.org/grpc v1.63.2 +# google.golang.org/grpc v1.70.0 => google.golang.org/grpc v1.63.2 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -936,7 +939,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.2 +# google.golang.org/protobuf v1.36.5 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson